code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowercase ( __A : Union[str, Any] , __A : Any , __A : Any=0 ) -> Any:
'''simple docstring'''
if name is None:
snake_case : str = None
else:
snake_case : int = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case : Tuple = fmt.format(__A )
# Print and recurse (if needed).
if isinstance(__A , __A ):
if msg is not None:
print(__A )
for k in val.keys():
recursive_print(__A , val[k] , spaces + 2 )
elif isinstance(__A , torch.Tensor ):
print(__A , """:""" , val.size() )
else:
print(__A , """:""" , __A )
def lowercase ( __A : Any , __A : Dict , __A : str , __A : Tuple , __A : Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case : int = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case : Optional[int] = param.view(*__A )
snake_case : Dict = param.transpose(0 , 2 )
snake_case : int = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case : Union[str, Any] = param.view(*__A )
snake_case : Dict = param.transpose(0 , 1 ).contiguous()
snake_case : Any = param.view(*__A )
return param
def lowercase ( __A : str , __A : Dict , __A : str ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = {}
# old versions did not store training args
snake_case : int = input_state_dict.get("""args""" , __A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case : Dict = ds_args.padded_vocab_size
snake_case : Dict = ds_args.max_position_embeddings
snake_case : Optional[Any] = ds_args.hidden_size
snake_case : List[str] = ds_args.num_layers
snake_case : str = ds_args.num_attention_heads
snake_case : Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case : List[str] = config.n_head
# The hidden_size per head.
snake_case : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case : List[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case : int = 0.0
# The model.
snake_case : Optional[int] = input_state_dict["""model"""]
# The language model.
snake_case : Union[str, Any] = model["""language_model"""]
# The embeddings.
snake_case : List[str] = lm["""embedding"""]
# The word embeddings.
snake_case : Any = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case : Union[str, Any] = word_embeddings[: config.vocab_size, :]
snake_case : str = word_embeddings
# The position embeddings.
snake_case : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case : Optional[Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
snake_case : Tuple = pos_embeddings
# The transformer.
snake_case : Union[str, Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case : Optional[int] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case : str = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case : Optional[Any] = layer_re.match(__A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case : List[str] = int(m.group(1 ) )
# The name of the operation.
snake_case : Any = m.group(2 )
# Is it a weight or a bias?
snake_case : Any = m.group(3 )
# The name of the layer.
snake_case : Tuple = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case : Any = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case : Any = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case : Any = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __A , __A )
snake_case : List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case : Optional[int] = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case : Any = masked_bias
snake_case : Optional[Any] = fix_query_key_value_ordering(__A , __A , 3 , __A , __A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case : List[str] = fix_query_key_value_ordering(__A , __A , 3 , __A , __A )
# Store. No change of shape.
snake_case : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case : str = megatron_to_transformers[op_name]
snake_case : Tuple = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case : List[Any] = megatron_to_transformers[op_name]
snake_case : int = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case : List[Any] = transformer["""final_layernorm.weight"""]
snake_case : Any = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def lowercase ( ) -> Any:
'''simple docstring'''
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__A , help="""An optional config json file describing the pre-trained model.""" , )
snake_case : List[Any] = parser.parse_args()
# Extract the basename.
snake_case : Tuple = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case : List[str] = torch.load(__A , map_location="""cpu""" )
else:
snake_case : int = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case : Dict = input_state_dict.get("""args""" , __A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case : int = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case : Union[str, Any] = """gelu_new"""
else:
snake_case : int = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case : str = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case : List[Any] = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__A , summary_activation=__A , summary_proj_to_labels=__A , summary_first_dropout=0.1 , scale_attn_weights=__A , use_cache=__A , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
snake_case : int = GPTaConfig.from_json_file(args.config_file )
snake_case : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case : str = convert_megatron_checkpoint(__A , __A , __A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__A , __A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case : Tuple = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
snake_case : Any = """gpt2"""
snake_case : List[str] = AutoTokenizer.from_pretrained(__A )
snake_case : Optional[Any] = type(__A ).__name__
snake_case : Tuple = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__A )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(__A )
# Store the state_dict to file.
snake_case : List[str] = os.path.join(__A , """pytorch_model.bin""" )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(__A , __A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 36
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowercase ( a , a ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE_ :List[Any] = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ :Any = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
SCREAMING_SNAKE_CASE_ :str = flax_key_tuple[:-1] + ("weight",)
SCREAMING_SNAKE_CASE_ :Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def lowercase ( a , a , a ):
'''simple docstring'''
if "metadata" in layer:
SCREAMING_SNAKE_CASE_ :Dict = layer.split("metadata" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ :str = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE_ :str = layer.split("kvstore" )
SCREAMING_SNAKE_CASE_ :str = "".join(split_layer[0] )[:-1]
SCREAMING_SNAKE_CASE_ :str = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = layer.split("/" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "/".join(split_layer[:-1] )
SCREAMING_SNAKE_CASE_ :int = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE_ :Tuple = "file"
else:
SCREAMING_SNAKE_CASE_ :str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowercase ( a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = rename_keys(a )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE_ :List[str] = v
SCREAMING_SNAKE_CASE_ :Optional[Any] = new_current_block
torch.save(a , a )
def lowercase ( a , a , a , a , a = WEIGHTS_NAME ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Optional[int] = convert_file_size_to_int(a )
SCREAMING_SNAKE_CASE_ :int = []
SCREAMING_SNAKE_CASE_ :str = {}
SCREAMING_SNAKE_CASE_ :List[str] = 0
SCREAMING_SNAKE_CASE_ :Optional[int] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ :int = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
SCREAMING_SNAKE_CASE_ :Any = flatten_dict(a , sep="/" )
SCREAMING_SNAKE_CASE_ :Optional[Any] = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[Any] = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE_ :str = content
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE_ :Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
SCREAMING_SNAKE_CASE_ :List[Any] = torch.tensor(a )
SCREAMING_SNAKE_CASE_ :str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
SCREAMING_SNAKE_CASE_ :Any = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE_ :str = os.path.join(
a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
SCREAMING_SNAKE_CASE_ :Tuple = {}
SCREAMING_SNAKE_CASE_ :Dict = 0
SCREAMING_SNAKE_CASE_ :Optional[int] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE_ :Dict = os.path.join(a , weights_name.replace(".bin" , F"-{len(a )+1:05d}-of-???.bin" ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE_ :Optional[int] = {}
SCREAMING_SNAKE_CASE_ :int = {}
for idx, shard in enumerate(a ):
SCREAMING_SNAKE_CASE_ :Optional[Any] = weights_name.replace(
".bin" , F"-{idx+1:05d}-of-{len(a ):05d}.bin" ) # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE_ :Any = os.path.join(a , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(a , os.path.join(a , a ) )
SCREAMING_SNAKE_CASE_ :List[Any] = shard
for key in shard:
SCREAMING_SNAKE_CASE_ :str = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE_ :List[str] = {"total_size": total_size}
SCREAMING_SNAKE_CASE_ :Optional[int] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_ :Optional[int] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowercase ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE_ :Dict = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
SCREAMING_SNAKE_CASE_ :str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
SCREAMING_SNAKE_CASE_ :List[Any] = TaTokenizer.from_pretrained("t5-small" )
SCREAMING_SNAKE_CASE_ :Optional[int] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
SCREAMING_SNAKE_CASE_ :List[Any] = tokenizer(a , return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE_ :List[str] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 631
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : Tuple = LxmertTokenizer
__A : List[Any] = LxmertTokenizerFast
__A : Optional[Any] = True
__A : Dict = True
def __snake_case ( self : int ):
'''simple docstring'''
super().setUp()
lowercase :List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __snake_case ( self : Any , snake_case__ : int ):
'''simple docstring'''
lowercase :Optional[int] = '''UNwant\u00E9d,running'''
lowercase :Any = '''unwanted, running'''
return input_text, output_text
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Optional[Any] = self.tokenizer_class(self.vocab_file )
lowercase :Union[str, Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(snake_case__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 1_0, 8, 9] )
def __snake_case ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase :Optional[int] = self.get_tokenizer()
lowercase :str = self.get_rust_tokenizer()
lowercase :Tuple = '''I was born in 92000, and this is falsé.'''
lowercase :List[Any] = tokenizer.tokenize(snake_case__ )
lowercase :List[Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowercase :Tuple = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = self.get_rust_tokenizer()
lowercase :int = tokenizer.encode(snake_case__ )
lowercase :Union[str, Any] = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 475
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowerCamelCase_ = logging.getLogger(__name__)
def __magic_name__ ( __a : Union[str, Any] , __a : Any ):
'''simple docstring'''
if os.path.exists(__a ):
if os.path.exists(os.path.join(__a , """config.json""" ) ) and os.path.isfile(
os.path.join(__a , """config.json""" ) ):
os.remove(os.path.join(__a , """config.json""" ) )
if os.path.exists(os.path.join(__a , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(__a , """pytorch_model.bin""" ) ):
os.remove(os.path.join(__a , """pytorch_model.bin""" ) )
else:
os.makedirs(__a )
model.save_pretrained(__a )
def __magic_name__ ( __a : List[Any] , __a : Optional[Any]=False ):
'''simple docstring'''
UpperCamelCase__ = 2
if unlogit:
UpperCamelCase__ = torch.pow(__a , __a )
UpperCamelCase__ = p * torch.log(__a )
UpperCamelCase__ = 0
return -plogp.sum(dim=-1 )
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
logger.info("""lv, h >\t""" + """\t""".join(f"{x + 1}" for x in range(len(__a ) ) ) )
for row in range(len(__a ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + """\t""".join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + """\t""".join(f"{x:d}" for x in tensor[row].cpu().data ) )
def __magic_name__ ( __a : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=True , __a : Optional[Any]=True , __a : Any=None , __a : str=False ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCamelCase__ = torch.zeros(__a , __a ).to(args.device )
UpperCamelCase__ = torch.zeros(__a , __a ).to(args.device )
if head_mask is None:
UpperCamelCase__ = torch.ones(__a , __a ).to(args.device )
head_mask.requires_grad_(requires_grad=__a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCamelCase__ = None
UpperCamelCase__ = 0.0
UpperCamelCase__ = 0.0
for step, inputs in enumerate(tqdm(__a , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
UpperCamelCase__ = tuple(t.to(args.device ) for t in inputs )
((UpperCamelCase__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCamelCase__ = model(__a , labels=__a , head_mask=__a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__a ):
UpperCamelCase__ = entropy(attn.detach() , __a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCamelCase__ = 2
UpperCamelCase__ = torch.pow(torch.pow(__a , __a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
UpperCamelCase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(__a )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(__a )
logger.info("""Head ranked by importance scores""" )
UpperCamelCase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCamelCase__ = torch.arange(
head_importance.numel() , device=args.device )
UpperCamelCase__ = head_ranks.view_as(__a )
print_ad_tensor(__a )
return attn_entropy, head_importance, total_loss
def __magic_name__ ( __a : str , __a : Dict , __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = compute_heads_importance(__a , __a , __a , compute_entropy=__a )
UpperCamelCase__ = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , __a , original_score * args.masking_threshold )
UpperCamelCase__ = torch.ones_like(__a )
UpperCamelCase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCamelCase__ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCamelCase__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCamelCase__ = float("""Inf""" )
UpperCamelCase__ = head_importance.view(-1 ).sort()[1]
if len(__a ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
UpperCamelCase__ = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
UpperCamelCase__ = new_head_mask.view(-1 )
UpperCamelCase__ = 0.0
UpperCamelCase__ = new_head_mask.view_as(__a )
UpperCamelCase__ = new_head_mask.clone().detach()
print_ad_tensor(__a )
# Compute metric and head importance again
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , head_mask=__a )
UpperCamelCase__ = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , __a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(__a )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __magic_name__ ( __a : Any , __a : List[str] , __a : Dict , __a : Dict ):
'''simple docstring'''
UpperCamelCase__ = datetime.now()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , compute_importance=__a , head_mask=__a )
UpperCamelCase__ = 1 / loss
UpperCamelCase__ = datetime.now() - before_time
UpperCamelCase__ = sum(p.numel() for p in model.parameters() )
UpperCamelCase__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__a ) )
}
for k, v in heads_to_prune.items():
if isinstance(__a , __a ):
UpperCamelCase__ = [
v,
]
assert sum(len(__a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__a )
UpperCamelCase__ = sum(p.numel() for p in model.parameters() )
UpperCamelCase__ = datetime.now()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = compute_heads_importance(
__a , __a , __a , compute_entropy=__a , compute_importance=__a , head_mask=__a , actually_pruned=__a , )
UpperCamelCase__ = 1 / loss
UpperCamelCase__ = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __a , __a , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __a , __a )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(__a , args.output_dir )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__a , type=__a , required=__a , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__a , type=__a , required=__a , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=__a , type=__a , required=__a , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=__a , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=__a , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=__a , type=__a , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=__a , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=__a , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=__a , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=__a , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__a , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=__a , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=__a , default=42 )
parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__a , default="""""" , help="""Can be used for distant debugging.""" )
UpperCamelCase__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCamelCase__ = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
UpperCamelCase__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCamelCase__ = torch.device("""cuda""" , args.local_rank )
UpperCamelCase__ = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCamelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCamelCase__ = nn.parallel.DistributedDataParallel(
__a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__a )
elif args.n_gpu > 1:
UpperCamelCase__ = nn.DataParallel(__a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__a )
torch.save(__a , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , __a )
# Prepare dataset
UpperCamelCase__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCamelCase__ = (torch.from_numpy(__a ),)
UpperCamelCase__ = TensorDataset(*__a )
UpperCamelCase__ = RandomSampler(__a )
UpperCamelCase__ = DataLoader(__a , sampler=__a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__a , __a , __a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCamelCase__ = mask_heads(__a , __a , __a )
prune_heads(__a , __a , __a , __a )
if __name__ == "__main__":
main()
| 513
|
from __future__ import annotations
import time
import numpy as np
lowerCamelCase_ = [8, 5, 9, 7]
lowerCamelCase_ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase_ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = claim_vector
UpperCamelCase__ = allocated_resources_table
UpperCamelCase__ = maximum_claim_table
def UpperCAmelCase_ (self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ (self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ (self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(SCREAMING_SNAKE_CASE_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ (self ):
return {self.__need().index(SCREAMING_SNAKE_CASE_ ): i for i in self.__need()}
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.__need()
UpperCamelCase__ = self.__allocated_resources_table
UpperCamelCase__ = self.__available_resources()
UpperCamelCase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
UpperCamelCase__ = False
for each_need in need_list:
UpperCamelCase__ = True
for index, need in enumerate(SCREAMING_SNAKE_CASE_ ):
if need > available_resources[index]:
UpperCamelCase__ = False
break
if execution:
UpperCamelCase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(SCREAMING_SNAKE_CASE_ )
# update available/freed resources stack
UpperCamelCase__ = np.array(SCREAMING_SNAKE_CASE_ ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(SCREAMING_SNAKE_CASE_ ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase_ (self ):
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(SCREAMING_SNAKE_CASE_ ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(SCREAMING_SNAKE_CASE_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 513
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCAmelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
__magic_name__ : str = os.path.join(args.tf_model_dir, '''parameters.json''' )
__magic_name__ : Tuple = json.loads(open(UpperCAmelCase ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
__magic_name__ : Tuple = args.output + '''.pt'''
__magic_name__ : Any = OrderedDict()
with tf.device('''/CPU:0''' ):
__magic_name__ : Dict = tf.train.load_checkpoint(args.tf_model_dir )
__magic_name__ : Dict = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__magic_name__ : List[Any] = reader.get_tensor(UpperCAmelCase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
__magic_name__ : Tuple = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
__magic_name__ : Optional[Any] = 8
__magic_name__ : Optional[Any] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__magic_name__ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : Union[str, Any] = torch.tensor(UpperCAmelCase )
elif key_name.startswith('''model/moe''' ):
__magic_name__ : int = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
__magic_name__ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
__magic_name__ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : Optional[Any] = torch.tensor(UpperCAmelCase )
elif key_name.endswith('''/softmlp/kernel''' ):
__magic_name__ : Tuple = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
__magic_name__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : str = torch.tensor(UpperCAmelCase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
__magic_name__ : List[Any] = key_name[-9:-7]
for i in range(16 ):
__magic_name__ : Any = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
__magic_name__ : List[str] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__magic_name__ : List[Any] = torch.tensor(UpperCAmelCase )
elif key_name.startswith('''model/mlp''' ):
__magic_name__ : str = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
__magic_name__ : int = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
__magic_name__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : Any = torch.tensor(UpperCAmelCase )
elif key_name.endswith('''/p1/bias''' ):
__magic_name__ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
__magic_name__ : Optional[Any] = vnp.copy() # same because it is one dimensional
__magic_name__ : Tuple = torch.tensor(UpperCAmelCase )
elif key_name.endswith('''/p2/kernel''' ):
__magic_name__ : Optional[int] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
__magic_name__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : List[Any] = torch.tensor(UpperCAmelCase )
elif key_name.endswith('''/p2/bias''' ):
__magic_name__ : str = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
__magic_name__ : int = vnp.copy() # same because it is one dimensional
__magic_name__ : int = torch.tensor(UpperCAmelCase )
elif key_name.startswith('''model/ln''' ):
__magic_name__ : List[str] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__magic_name__ : Any = '''model.blocks.%d.feed_forward.norm.bias''' % player
__magic_name__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
__magic_name__ : Dict = torch.tensor(UpperCAmelCase )
elif key_name.endswith('''/g''' ):
__magic_name__ : Optional[Any] = '''model.blocks.%d.feed_forward.norm.weight''' % player
__magic_name__ : Optional[int] = vnp.copy() # same because it is one dimensional
__magic_name__ : Tuple = torch.tensor(UpperCAmelCase )
elif key_name.startswith('''model/att''' ):
__magic_name__ : Any = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
__magic_name__ : str = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__magic_name__ : Any = state[:, 0, :, :]
__magic_name__ : List[Any] = state[:, 1, :, :]
__magic_name__ : List[str] = state[:, 2, :, :]
__magic_name__ : Dict = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : List[Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : Tuple = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
__magic_name__ : str = torch.tensor(UpperCAmelCase )
__magic_name__ : str = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
__magic_name__ : int = torch.tensor(UpperCAmelCase )
__magic_name__ : int = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
__magic_name__ : Optional[Any] = torch.tensor(UpperCAmelCase )
elif key_name.endswith('''/o/kernel''' ):
__magic_name__ : Union[str, Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
__magic_name__ : int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : Any = torch.tensor(UpperCAmelCase )
elif key_name.startswith('''model/an''' ):
__magic_name__ : Tuple = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
__magic_name__ : Any = '''model.blocks.%d.self_attn.norm.bias''' % player
__magic_name__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
__magic_name__ : Tuple = torch.tensor(UpperCAmelCase )
elif key_name.endswith('''/g''' ):
__magic_name__ : Tuple = '''model.blocks.%d.self_attn.norm.weight''' % player
__magic_name__ : Union[str, Any] = vnp.copy() # same because it is one dimensional
__magic_name__ : List[Any] = torch.tensor(UpperCAmelCase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
__magic_name__ : List[Any] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
__magic_name__ : str = '''model.%s.weight''' % nlayer
__magic_name__ : Union[str, Any] = vnp.copy() # same in embedded
__magic_name__ : Union[str, Any] = torch.tensor(UpperCAmelCase )
if key_name.startswith('''model/wte''' ):
__magic_name__ : List[str] = '''lm_head.weight'''
__magic_name__ : Union[str, Any] = vnp.copy() # same in embedded
__magic_name__ : Optional[int] = torch.tensor(UpperCAmelCase )
elif key_name.startswith('''model/wob''' ):
__magic_name__ : Any = '''final_logits_bias'''
__magic_name__ : Optional[Any] = vnp.copy() # same in embedded
__magic_name__ : Optional[Any] = state.reshape((1, -1) )
__magic_name__ : List[Any] = torch.tensor(UpperCAmelCase )
elif key_name == "model/dense/kernel":
__magic_name__ : Optional[int] = '''model.last_project.weight'''
__magic_name__ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__magic_name__ : str = torch.tensor(UpperCAmelCase )
elif key_name == "model/dense_1/bias":
__magic_name__ : Union[str, Any] = '''model.last_project.bias'''
__magic_name__ : List[str] = vnp.copy() # same because it is one dimensional
__magic_name__ : int = torch.tensor(UpperCAmelCase )
torch.save(UpperCAmelCase, args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 713
|
from __future__ import annotations
class A__ :
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
__magic_name__ : List[str] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase ( UpperCAmelCase ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
__magic_name__ : Tuple = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : List[str] = Node(4 )
__magic_name__ : str = Node(5 )
__magic_name__ : List[Any] = Node(6 )
__magic_name__ : Optional[int] = Node(7 )
__magic_name__ : str = Node(8 )
__magic_name__ : str = Node(9 )
print(is_full_binary_tree(UpperCAmelCase ) )
print(depth_of_tree(UpperCAmelCase ) )
print('''Tree is: ''' )
display(UpperCAmelCase )
if __name__ == "__main__":
main()
| 336
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase = logging.getLogger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ):
lowercase = bnb_quantization_config.load_in_abit
lowercase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
lowercase = []
# custom device map
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
lowercase = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase = get_keys_to_not_convert(__SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__SCREAMING_SNAKE_CASE )
lowercase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase = []
lowercase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__SCREAMING_SNAKE_CASE )
# compatibility with peft
lowercase = load_in_abit
lowercase = load_in_abit
lowercase = get_parameter_device(__SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
lowercase = replace_with_bnb_layers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , modules_to_not_convert=__SCREAMING_SNAKE_CASE )
# convert param to the right dtype
lowercase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase = name.replace('.weight' , '' ).replace('.bias' , '' )
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__SCREAMING_SNAKE_CASE ):
param.to(__SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowercase = replace_with_bnb_layers(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , modules_to_not_convert=__SCREAMING_SNAKE_CASE )
lowercase = get_quantized_model_device_map(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_memory=__SCREAMING_SNAKE_CASE , no_split_module_classes=__SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase = True
lowercase = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=__SCREAMING_SNAKE_CASE , offload_state_dict=__SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , offload_dir=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
if device_map is None:
if torch.cuda.is_available():
lowercase = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
lowercase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase = {}
lowercase = special_dtypes
lowercase = no_split_module_classes
lowercase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase = get_balanced_memory(
__SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase = max_memory
lowercase = infer_auto_device_map(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
lowercase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
if modules_to_not_convert is None:
lowercase = []
lowercase , lowercase = _replace_with_bnb_layers(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ):
lowercase = False
for name, module in model.named_children():
if current_key_name is None:
lowercase = []
current_key_name.append(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase = '.'.join(__SCREAMING_SNAKE_CASE )
lowercase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
lowercase = module.weight.data
if module.bias is not None:
lowercase = module.bias.data
bnb_module.requires_grad_(__SCREAMING_SNAKE_CASE )
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = True
if len(list(module.children() ) ) > 0:
lowercase , lowercase = _replace_with_bnb_layers(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# Create a copy of the model
with init_empty_weights():
lowercase = deepcopy(__SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase = find_tied_parameters(__SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase = sum(__SCREAMING_SNAKE_CASE , [] )
lowercase = len(__SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
lowercase = False
if hasattr(__SCREAMING_SNAKE_CASE , 'base_model_prefix' ):
lowercase = not hasattr(__SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase = list(model.named_children() )
lowercase = [list_modules[-1][0]]
# add last module together with tied weights
lowercase = set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE )
lowercase = list(set(__SCREAMING_SNAKE_CASE ) ) + list(__SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
lowercase = ['.weight', '.bias']
lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase = name.replace(__SCREAMING_SNAKE_CASE , '' )
filtered_module_names.append(__SCREAMING_SNAKE_CASE )
return filtered_module_names
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
for m in model.modules():
if isinstance(__SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return next(parameter.parameters() ).device
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 , dtype=__SCREAMING_SNAKE_CASE , value=__SCREAMING_SNAKE_CASE )
lowercase = param_name
lowercase = model
if "." in tensor_name:
lowercase = tensor_name.split('.' )
for split in splits[:-1]:
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase = new_module
lowercase = splits[-1]
# offload weights
lowercase = False
offload_weight(module._parameters[tensor_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
else:
offload_weight(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE )
offload_weight(__SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , __SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'meta' , dtype=__SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 84
|
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
lowercase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
lowercase = rows
else:
lowercase = []
def SCREAMING_SNAKE_CASE__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
lowercase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
lowercase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
lowercase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
| 1
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase ( enum.Enum ):
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
@add_end_docstrings(_lowerCamelCase )
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : List[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase__ = None
if self.model.config.prefix is not None:
lowerCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params )
lowerCamelCase__ = {**self._preprocess_params, **preprocess_params}
lowerCamelCase__ = {**self._forward_params, **forward_params}
def a__ ( self : Optional[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Dict , ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = {}
if prefix is not None:
lowerCamelCase__ = prefix
if prefix:
lowerCamelCase__ = self.tokenizer(
__lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework )
lowerCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, \'hole\']" )
lowerCamelCase__ = handle_long_generation
preprocess_params.update(__lowerCAmelCase )
lowerCamelCase__ = generate_kwargs
lowerCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowerCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowerCamelCase__ = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase__ = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowerCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a__ ( self : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ) -> str:
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase )
def __call__( self : int , __lowerCamelCase : Tuple , **__lowerCamelCase : int ) -> Dict:
'''simple docstring'''
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def a__ ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple="" , __lowerCamelCase : List[str]=None , **__lowerCamelCase : List[str] ) -> int:
'''simple docstring'''
lowerCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework )
lowerCamelCase__ = prompt_text
if handle_long_generation == "hole":
lowerCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase__ = generate_kwargs["max_new_tokens"]
else:
lowerCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowerCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def a__ ( self : str , __lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = model_inputs["input_ids"]
lowerCamelCase__ = model_inputs.get("attention_mask" , __lowerCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = 1
else:
lowerCamelCase__ = input_ids.shape[0]
lowerCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowerCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase__ = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase )
lowerCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase__ = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase__ = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a__ ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str]=ReturnType.FULL_TEXT , __lowerCamelCase : Dict=True ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = model_outputs["generated_sequence"][0]
lowerCamelCase__ = model_outputs["input_ids"]
lowerCamelCase__ = model_outputs["prompt_text"]
lowerCamelCase__ = generated_sequence.numpy().tolist()
lowerCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase__ = self.tokenizer.decode(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase__ = 0
else:
lowerCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase__ = prompt_text + text[prompt_length:]
else:
lowerCamelCase__ = text[prompt_length:]
lowerCamelCase__ = {"generated_text": all_text}
records.append(__lowerCAmelCase )
return records
| 707
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionDiffEditPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def a__ ( self : int ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
lowerCamelCase__ = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_zero=__lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__ = CLIPTextModel(__lowerCamelCase )
lowerCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=0 ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=0 ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def a__ ( self : Tuple ) -> str:
'''simple docstring'''
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase__ = self.get_dummy_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe(**__lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCamelCase )
lowerCamelCase__ = self.pipeline_class.from_pretrained(__lowerCamelCase )
pipe_loaded.to(__lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCamelCase , __lowerCamelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCamelCase__ = self.get_dummy_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe_loaded(**__lowerCamelCase )[0]
lowerCamelCase__ = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCamelCase , 1E-4 )
def a__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_mask_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.generate_mask(**__lowerCamelCase )
lowerCamelCase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase__ = np.array([0] * 9 )
lowerCamelCase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def a__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.invert(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase__ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
def a__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def a__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = {"beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "beta_schedule": "scaled_linear"}
lowerCamelCase__ = DPMSolverMultistepScheduler(**__lowerCamelCase )
lowerCamelCase__ = DPMSolverMultistepInverseScheduler(**__lowerCamelCase )
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.invert(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase__ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def a__ ( cls : int ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase__ = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase__ = raw_image
def a__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
lowerCamelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "a bowl of fruit"
lowerCamelCase__ = "a bowl of pears"
lowerCamelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCamelCase , target_prompt=__lowerCamelCase , generator=__lowerCamelCase , )
lowerCamelCase__ = pipe.invert(
prompt=__lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCamelCase ).latents
lowerCamelCase__ = pipe(
prompt=__lowerCamelCase , mask_image=__lowerCamelCase , image_latents=__lowerCamelCase , generator=__lowerCamelCase , negative_prompt=__lowerCamelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase__ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def a__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "a bowl of fruit"
lowerCamelCase__ = "a bowl of pears"
lowerCamelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCamelCase , target_prompt=__lowerCamelCase , generator=__lowerCamelCase , )
lowerCamelCase__ = pipe.invert(
prompt=__lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCamelCase , num_inference_steps=25 , ).latents
lowerCamelCase__ = pipe(
prompt=__lowerCamelCase , mask_image=__lowerCamelCase , image_latents=__lowerCamelCase , generator=__lowerCamelCase , negative_prompt=__lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase__ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 187
| 0
|
'''simple docstring'''
snake_case_ = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
snake_case_ = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
snake_case_ = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
snake_case_ = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
snake_case_ = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
snake_case_ = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
snake_case_ = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
snake_case_ = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 421
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@staticmethod
@abstractmethod
def __lowerCamelCase ( lowercase__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def __lowerCamelCase ( self ):
"""simple docstring"""
raise NotImplementedError()
| 421
| 1
|
import argparse
import os
import re
lowerCamelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCamelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ = re.compile(R'''\[([^\]]+)\]''')
def A(__a: List[Any] ):
lowerCAmelCase_ = _re_indent.search(__a )
return "" if search is None else search.groups()[0]
def A(__a: Optional[Any] , __a: Optional[Any]="" , __a: Optional[int]=None , __a: Optional[int]=None ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__a ):
index += 1
lowerCAmelCase_ = ["\n".join(lines[:index] )]
else:
lowerCAmelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ = [lines[index]]
index += 1
while index < len(__a ) and (end_prompt is None or not lines[index].startswith(__a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__a ) )
if index < len(__a ) - 1:
lowerCAmelCase_ = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ = []
else:
blocks.append("\n".join(__a ) )
lowerCAmelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__a ) > 0:
blocks.append("\n".join(__a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__a ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def A(__a: Tuple ):
def _inner(__a: Optional[int] ):
return key(__a ).lower().replace("_" , "" )
return _inner
def A(__a: str , __a: Optional[Any]=None ):
# If no key is provided, we use a noop.
def noop(__a: List[Any] ):
return x
if key is None:
lowerCAmelCase_ = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ = [obj for obj in objects if key(__a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ = [obj for obj in objects if key(__a )[0].isupper() and not key(__a ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ = [obj for obj in objects if not key(__a )[0].isupper()]
lowerCAmelCase_ = ignore_underscore(__a )
return sorted(__a , key=__a ) + sorted(__a , key=__a ) + sorted(__a , key=__a )
def A(__a: Dict ):
# This inner function sort imports between [ ].
def _replace(__a: Any ):
lowerCAmelCase_ = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase_ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(__a )] ) + "]"
lowerCAmelCase_ = import_statement.split("\n" )
if len(__a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase_ = [(i, _re_strip_line.search(__a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ = sort_objects(__a , key=lambda __a : x[1] )
lowerCAmelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ = keys[:-1]
lowerCAmelCase_ = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(__a )] )
return "\n".join(__a )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ = _re_bracket_content.sub(_replace , __a )
return import_statement
def A(__a: Union[str, Any] , __a: str=True ):
with open(__a , encoding="utf-8" ) as f:
lowerCAmelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ = split_code_in_indented_blocks(
__a , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ = main_blocks[block_idx]
lowerCAmelCase_ = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase_ = 0
while line_idx < len(__a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ = len(__a )
else:
line_idx += 1
if line_idx >= len(__a ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ = split_code_in_indented_blocks(__a , indent_level=__a )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ = [(pattern.search(__a ).groups()[0] if pattern.search(__a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ = [(i, key) for i, key in enumerate(__a ) if key is not None]
lowerCAmelCase_ = [x[0] for x in sorted(__a , key=lambda __a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ = 0
lowerCAmelCase_ = []
for i in range(len(__a ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__a )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__a ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__a ) )
def A(__a: Any=True ):
lowerCAmelCase_ = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
lowerCAmelCase_ = sort_imports(os.path.join(__a , "__init__.py" ) , check_only=__a )
if result:
lowerCAmelCase_ = [os.path.join(__a , "__init__.py" )]
if len(__a ) > 0:
raise ValueError(F"Would overwrite {len(__a )} files, run `make style`." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCamelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 711
|
from __future__ import annotations
lowerCamelCase__ = 8.9_88e9 # units = N * m^s * C^-2
def A(__a: float , __a: float , __a: float , __a: float ):
lowerCAmelCase_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
lowerCAmelCase_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowerCAmelCase_ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowerCAmelCase_ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowerCAmelCase_ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94
|
'''simple docstring'''
from collections import deque
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = process_name # process name
UpperCamelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCamelCase = arrival_time
UpperCamelCase = burst_time # remaining burst time
UpperCamelCase = 0 # total time of the process wait in ready queue
UpperCamelCase = 0 # time from arrival time to completion time
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : deque[Process] , UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCamelCase = time_slices
# unfinished process is in this ready_queue
UpperCamelCase = queue
# current time
UpperCamelCase = current_time
# finished process is in this sequence queue
UpperCamelCase = deque()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def A ( self : Optional[Any] , UpperCamelCase__ : list[Process] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(len(UpperCamelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def A ( self : Dict , UpperCamelCase__ : list[Process] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(len(UpperCamelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def A ( self : int , UpperCamelCase__ : list[Process] ):
"""simple docstring"""
UpperCamelCase = []
for i in range(len(UpperCamelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def A ( self : Optional[int] , UpperCamelCase__ : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def A ( self : Any , UpperCamelCase__ : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def A ( self : Union[str, Any] , UpperCamelCase__ : deque[Process] ):
"""simple docstring"""
UpperCamelCase = deque() # sequence deque of finished process
while len(UpperCamelCase__ ) != 0:
UpperCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCamelCase = 0
# set the process's turnaround time because it is finished
UpperCamelCase = self.current_time - cp.arrival_time
# set the completion time
UpperCamelCase = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def A ( self : Union[str, Any] , UpperCamelCase__ : deque[Process] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase__ ) ):
UpperCamelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCamelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCamelCase = 0
# set the finish time
UpperCamelCase = self.current_time
# update the process' turnaround time because it is finished
UpperCamelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def A ( self : Any ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
UpperCamelCase , UpperCamelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowerCamelCase : Optional[Any] = Process("P1", 0, 53)
_lowerCamelCase : List[str] = Process("P2", 0, 17)
_lowerCamelCase : Optional[int] = Process("P3", 0, 68)
_lowerCamelCase : Dict = Process("P4", 0, 24)
_lowerCamelCase : Union[str, Any] = 3
_lowerCamelCase : int = [17, 25]
_lowerCamelCase : List[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_lowerCamelCase : Dict = Process("P1", 0, 53)
_lowerCamelCase : int = Process("P2", 0, 17)
_lowerCamelCase : Union[str, Any] = Process("P3", 0, 68)
_lowerCamelCase : str = Process("P4", 0, 24)
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Any = [17, 25]
_lowerCamelCase : str = deque([Pa, Pa, Pa, Pa])
_lowerCamelCase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowerCamelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 430
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 714
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "gptj"
__magic_name__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=5_0400 , snake_case__=2048 , snake_case__=4096 , snake_case__=28 , snake_case__=16 , snake_case__=64 , snake_case__=None , snake_case__="gelu_new" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=1E-5 , snake_case__=0.02 , snake_case__=True , snake_case__=5_0256 , snake_case__=5_0256 , snake_case__=False , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = n_positions
_lowerCAmelCase : Optional[int] = n_embd
_lowerCAmelCase : Optional[int] = n_layer
_lowerCAmelCase : str = n_head
_lowerCAmelCase : Tuple = n_inner
_lowerCAmelCase : Tuple = rotary_dim
_lowerCAmelCase : Optional[int] = activation_function
_lowerCAmelCase : Any = resid_pdrop
_lowerCAmelCase : List[str] = embd_pdrop
_lowerCAmelCase : int = attn_pdrop
_lowerCAmelCase : Any = layer_norm_epsilon
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : List[str] = use_cache
_lowerCAmelCase : Dict = bos_token_id
_lowerCAmelCase : Any = eos_token_id
super().__init__(
bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = "default" , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
super().__init__(snake_case__ , task=snake_case__ , patching_specs=snake_case__ , use_past=snake_case__ )
if not getattr(self._config , 'pad_token_id' , snake_case__ ):
# TODO: how to do that better?
_lowerCAmelCase : Any = 0
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
_lowerCAmelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def a ( self ):
'''simple docstring'''
return self._config.n_layer
@property
def a ( self ):
'''simple docstring'''
return self._config.n_head
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Any = seqlen + 2
_lowerCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Tuple = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
_lowerCAmelCase : Tuple = common_inputs['attention_mask']
if self.use_past:
_lowerCAmelCase : Any = ordered_inputs['attention_mask'].dtype
_lowerCAmelCase : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def a ( self ):
'''simple docstring'''
return 13
| 630
| 0
|
import re
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
snake_case__ = re.compile(
r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' )
return bool(re.search(__snake_case , __snake_case ) )
if __name__ == "__main__":
lowerCamelCase__ : int = """0094702343221"""
print(is_sri_lankan_phone_number(phone))
| 33
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a ( __snake_case : List[Any], __snake_case : Union[str, Any], __snake_case : str=1E-12 ):
'''simple docstring'''
UpperCAmelCase_ :int = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(__snake_case, axis=1 ), a_min=__snake_case ) ).T
UpperCAmelCase_ :List[Any] = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(__snake_case, axis=1 ), a_min=__snake_case ) ).T
return jnp.matmul(__snake_case, norm_emb_a.T )
class _snake_case ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ =42
UpperCamelCase__ =jnp.floataa
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
UpperCAmelCase_ :Any = nn.Dense(self.config.projection_dim , use_bias=snake_case , dtype=self.dtype )
UpperCAmelCase_ :Tuple = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCAmelCase_ :Union[str, Any] = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCAmelCase_ :Tuple = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
UpperCAmelCase_ :List[str] = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Any , snake_case : Dict ):
UpperCAmelCase_ :Union[str, Any] = self.vision_model(snake_case )[1]
UpperCAmelCase_ :Any = self.visual_projection(snake_case )
UpperCAmelCase_ :Any = jax_cosine_distance(snake_case , self.special_care_embeds )
UpperCAmelCase_ :List[str] = jax_cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCAmelCase_ :List[str] = 0.0
UpperCAmelCase_ :Any = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCAmelCase_ :List[str] = jnp.round(snake_case , 3 )
UpperCAmelCase_ :Any = jnp.any(special_scores > 0 , axis=1 , keepdims=snake_case )
# Use a lower threshold if an image has any special care concept
UpperCAmelCase_ :Tuple = is_special_care * 0.01
UpperCAmelCase_ :int = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCAmelCase_ :Any = jnp.round(snake_case , 3 )
UpperCAmelCase_ :Optional[Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =CLIPConfig
UpperCamelCase__ ="""clip_input"""
UpperCamelCase__ =FlaxStableDiffusionSafetyCheckerModule
def __init__( self : int , snake_case : CLIPConfig , snake_case : Optional[Tuple] = None , snake_case : int = 0 , snake_case : jnp.dtype = jnp.floataa , snake_case : bool = True , **snake_case : Optional[int] , ):
if input_shape is None:
UpperCAmelCase_ :str = (1, 224, 224, 3)
UpperCAmelCase_ :Optional[int] = self.module_class(config=snake_case , dtype=snake_case , **snake_case )
super().__init__(snake_case , snake_case , input_shape=snake_case , seed=snake_case , dtype=snake_case , _do_init=_do_init )
def snake_case_ ( self : List[str] , snake_case : jax.random.KeyArray , snake_case : Tuple , snake_case : FrozenDict = None ):
# init input tensor
UpperCAmelCase_ :Optional[int] = jax.random.normal(snake_case , snake_case )
UpperCAmelCase_ ,UpperCAmelCase_ :Tuple = jax.random.split(snake_case )
UpperCAmelCase_ :Dict = {'''params''': params_rng, '''dropout''': dropout_rng}
UpperCAmelCase_ :List[str] = self.module.init(snake_case , snake_case )['''params''']
return random_params
def __call__( self : Dict , snake_case : Optional[int] , snake_case : dict = None , ):
UpperCAmelCase_ :Union[str, Any] = jnp.transpose(snake_case , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(snake_case , dtype=jnp.floataa ) , rngs={} , )
| 608
| 0
|
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a : int = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a_ :
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Union[str, Any]=13 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : Dict=14 , __UpperCamelCase : Tuple=10 , __UpperCamelCase : Any=19 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : str=2 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : int=4 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Union[str, Any]=[1, 2, 3, 4, 5] , __UpperCamelCase : Tuple=25 , __UpperCamelCase : List[Any]=5 , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(__UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoformerModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = model.create_network_inputs(**__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=__UpperCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = decoder(
trend=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a : Any = (AutoformerForPrediction,) if is_torch_available() else ()
a : Optional[Any] = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
a : Optional[int] = False
a : List[str] = False
a : Optional[int] = False
a : Optional[int] = False
a : List[Any] = False
a : Optional[int] = False
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _snake_case ( self : Union[str, Any] ) ->int:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = inspect.signature(getattr(__UpperCamelCase , """forward""" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __UpperCamelCase )
def _snake_case ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase )
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , """seq_length""" , __UpperCamelCase )
_UpperCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , __UpperCamelCase )
_UpperCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , __UpperCamelCase )
_UpperCAmelCase = getattr(self.model_tester , """d_model""" , __UpperCamelCase )
_UpperCAmelCase = getattr(self.model_tester , """num_attention_heads""" , __UpperCamelCase )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 2 , len(__UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _UpperCamelCase ( _A="train-batch.pt" ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_A , repo_type="""dataset""" )
_UpperCAmelCase = torch.load(_A , map_location=_A )
return batch
@require_torch
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_UpperCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def _snake_case ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_UpperCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__UpperCamelCase )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __UpperCamelCase , rtol=1e-1 ) )
| 19
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) )
| 19
| 1
|
"""simple docstring"""
from collections import defaultdict
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__snake_case = 1
__snake_case = True
for v in tree[start]:
if v not in visited:
ret += dfs(SCREAMING_SNAKE_CASE )
if ret % 2 == 0:
cuts.append(SCREAMING_SNAKE_CASE )
return ret
def __UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10, 9
_SCREAMING_SNAKE_CASE = defaultdict(list)
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 163
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_SCREAMING_SNAKE_CASE = F"""https://www.google.com/search?q={query}&num=100"""
_SCREAMING_SNAKE_CASE = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_SCREAMING_SNAKE_CASE = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_SCREAMING_SNAKE_CASE = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 163
| 1
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : Tuple = '▁'
UpperCAmelCase_ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : Union[str, Any] = BigBirdTokenizer
snake_case__ : int = BigBirdTokenizerFast
snake_case__ : Optional[Any] = True
snake_case__ : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
super().setUp()
a_ : int = self.tokenizer_class(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
a_ : str = '<s>'
a_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
a_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_0_0_4 )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
if not self.test_rust_tokenizer:
return
a_ : str = self.get_tokenizer()
a_ : List[Any] = self.get_rust_tokenizer()
a_ : List[Any] = 'I was born in 92000, and this is falsé.'
a_ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
a_ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
a_ : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
a_ : Any = self.get_rust_tokenizer()
a_ : str = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ : Union[str, Any] = BigBirdTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
a_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
a_ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
a_ : int = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : str = 'Hello World!'
a_ : Optional[Any] = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : List[Any] = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
a_ : Optional[int] = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
a_ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
a_ : Union[str, Any] = ' '.join(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = BigBirdConfig(attention_type='original_full' )
a_ : int = BigBirdModel(SCREAMING_SNAKE_CASE__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE__ )
model(**SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : Dict = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
a_ : List[str] = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
# fmt: off
a_ : Optional[Any] = {'input_ids': [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 705
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
UpperCAmelCase_ : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Union[str, Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
a_ : Union[str, Any] = k.replace(__A , __A )
return k
def SCREAMING_SNAKE_CASE_ ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
"""simple docstring"""
a_ : int = DEFAULTS.copy()
cfg_kwargs.update(__A )
a_ : List[str] = PegasusConfig(**__A )
a_ : List[Any] = PegasusForConditionalGeneration(__A )
a_ : List[str] = torch_model.model.state_dict()
a_ : Optional[int] = {}
for k, v in tf_weights.items():
a_ : str = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
a_ : Union[str, Any] = v.T
a_ : List[str] = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
a_ : int = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
a_ : Optional[Any] = mapping['shared.weight']
a_ : Any = mapping['shared.weight']
a_ : Optional[int] = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**__A )
a_ , a_ : Optional[Any] = torch_model.model.load_state_dict(__A , strict=__A )
a_ : Optional[Any] = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def SCREAMING_SNAKE_CASE_ ( __A : Dict="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
a_ : List[Any] = tf.train.list_variables(__A )
a_ : Optional[Any] = {}
a_ : int = ['Adafactor', 'global_step']
for name, shape in tqdm(__A , desc='converting tf checkpoint to dict' ):
a_ : Optional[int] = any(pat in name for pat in ignore_name )
if skip_key:
continue
a_ : List[str] = tf.train.load_variable(__A , __A )
a_ : Optional[Any] = array
return tf_weights
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
a_ : Any = Path(__A ).parent.name
a_ : Dict = task_specific_params[F"""summarization_{dataset}"""]['max_position_embeddings']
a_ : Union[str, Any] = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
a_ : List[Any] = get_tf_weights_as_numpy(__A )
a_ : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
a_ : Union[str, Any] = task_specific_params
a_ : Optional[int] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
a_ : Dict = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(__A , Path(__A ) / 'pytorch_model.bin' )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : str = parser.parse_args()
if args.save_dir is None:
UpperCAmelCase_ : str = Path(args.tf_ckpt_path).parent.name
UpperCAmelCase_ : Optional[int] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 443
| 0
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(A__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
while True:
SCREAMING_SNAKE_CASE_ : Tuple = ['$'] * len(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(len(A__ ) ):
for j in range(i + 1, len(A__ ) ):
SCREAMING_SNAKE_CASE_ : str = compare_string(binary[i], binary[j] )
if k is False:
SCREAMING_SNAKE_CASE_ : Dict = '*'
SCREAMING_SNAKE_CASE_ : Tuple = '*'
temp.append('X' )
for i in range(len(A__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(A__ ) == 0:
return pi
SCREAMING_SNAKE_CASE_ : Tuple = list(set(A__ ) )
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : str = []
for minterm in minterms:
SCREAMING_SNAKE_CASE_ : int = ''
for _ in range(A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(A__ )
return temp
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(A__ )
SCREAMING_SNAKE_CASE_ : int = list(A__ )
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : str = [0] * len(A__ )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : List[Any] = -1
for j in range(len(A__ ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE_ : List[Any] = j
if count == 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
for i in range(len(A__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(A__ ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Any = -1
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for i in range(len(A__ ) ):
SCREAMING_SNAKE_CASE_ : Any = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = count_n
SCREAMING_SNAKE_CASE_ : Any = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(A__ ) ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = [[0 for x in range(len(A__ ) )] for x in range(len(A__ ) )]
for i in range(len(A__ ) ):
SCREAMING_SNAKE_CASE_ : Tuple = prime_implicants[i].count('_' )
for j in range(len(A__ ) ):
if is_for_table(prime_implicants[i], binary[j], A__ ):
SCREAMING_SNAKE_CASE_ : int = 1
return chart
def a__ ( ):
SCREAMING_SNAKE_CASE_ : int = int(input('Enter the no. of variables\n' ) )
SCREAMING_SNAKE_CASE_ : int = [
float(A__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
SCREAMING_SNAKE_CASE_ : List[str] = decimal_to_binary(A__, A__ )
SCREAMING_SNAKE_CASE_ : str = check(A__ )
print('Prime Implicants are:' )
print(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prime_implicant_chart(A__, A__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = selection(A__, A__ )
print('Essential Prime Implicants are:' )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 101
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] ={
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """git_vision_model"""
def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__="quick_gelu" , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
@classmethod
def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """git"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1_0_1 , lowerCAmelCase__=1_0_2 , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Dict = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
SCREAMING_SNAKE_CASE_ : str = GitVisionConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : int = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE_ : List[str] = use_cache
SCREAMING_SNAKE_CASE_ : Any = tie_word_embeddings
SCREAMING_SNAKE_CASE_ : Dict = num_image_with_embedding
SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE_ : str = eos_token_id
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : str = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type
return output
| 101
| 1
|
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(SCREAMING_SNAKE_CASE )[3:] )
__snake_case = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(SCREAMING_SNAKE_CASE ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 614
|
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : Any = 'data2vec-audio'
def __init__( self : Any , snake_case_ : Optional[Any]=32 , snake_case_ : Tuple=768 , snake_case_ : Optional[int]=12 , snake_case_ : str=12 , snake_case_ : int=3072 , snake_case_ : Tuple="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : int=0.1 , snake_case_ : int=0.0 , snake_case_ : Optional[int]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Dict=0.02 , snake_case_ : Optional[int]=1e-5 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , snake_case_ : int=(5, 2, 2, 2, 2, 2, 2) , snake_case_ : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case_ : Optional[int]=False , snake_case_ : int=16 , snake_case_ : List[Any]=19 , snake_case_ : str=5 , snake_case_ : List[Any]=0.05 , snake_case_ : Dict=10 , snake_case_ : Dict=2 , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=10 , snake_case_ : Optional[Any]=0 , snake_case_ : Optional[Any]="sum" , snake_case_ : List[str]=False , snake_case_ : List[Any]=False , snake_case_ : Optional[int]=256 , snake_case_ : int=(512, 512, 512, 512, 1500) , snake_case_ : List[Any]=(5, 3, 3, 1, 1) , snake_case_ : Any=(1, 2, 3, 1, 1) , snake_case_ : List[Any]=512 , snake_case_ : Optional[int]=0 , snake_case_ : Dict=1 , snake_case_ : List[str]=2 , snake_case_ : Optional[Any]=False , snake_case_ : Optional[Any]=3 , snake_case_ : Tuple=2 , snake_case_ : Any=3 , snake_case_ : Union[str, Any]=None , **snake_case_ : int , ):
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
__snake_case = hidden_size
__snake_case = feat_extract_activation
__snake_case = list(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = conv_pos_kernel_size
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
__snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = list(snake_case_ )
__snake_case = xvector_output_dim
@property
def lowerCAmelCase ( self : List[str] ):
return math.prod(self.conv_stride )
| 614
| 1
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase( pl.LightningModule ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ):
super().__init__()
UpperCamelCase_: Optional[Any] = model
UpperCamelCase_: Any = 2
UpperCamelCase_: Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _a ( self ):
pass
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
# load longformer model from model identifier
UpperCamelCase_: List[str] = LongformerModel.from_pretrained(UpperCAmelCase__ )
UpperCamelCase_: Dict = LightningModel(UpperCAmelCase__ )
UpperCamelCase_: int = torch.load(UpperCAmelCase__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
UpperCamelCase_: Optional[Any] = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase__ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : Dict = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
assert (
isinstance(UpperCAmelCase_, UpperCAmelCase_ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
A__ , A__ = 1, 1
for _ in range(number_of_steps - 1 ):
A__ , A__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[str] = """data2vec-vision"""
def __init__( self : Optional[Any] , __lowerCamelCase : List[str]=768 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Any=3_072 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : int=1E-12 , __lowerCamelCase : Tuple=224 , __lowerCamelCase : str=16 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=[3, 5, 7, 11] , __lowerCamelCase : List[str]=[1, 2, 3, 6] , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : int=0.4 , __lowerCamelCase : Any=256 , __lowerCamelCase : Dict=1 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Dict=255 , **__lowerCamelCase : Tuple , ):
super().__init__(**__lowerCamelCase )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Union[str, Any] = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :Any = intermediate_size
UpperCamelCase :Union[str, Any] = hidden_act
UpperCamelCase :Optional[int] = hidden_dropout_prob
UpperCamelCase :Tuple = attention_probs_dropout_prob
UpperCamelCase :List[str] = initializer_range
UpperCamelCase :str = layer_norm_eps
UpperCamelCase :List[Any] = image_size
UpperCamelCase :str = patch_size
UpperCamelCase :Optional[Any] = num_channels
UpperCamelCase :List[str] = use_mask_token
UpperCamelCase :str = use_absolute_position_embeddings
UpperCamelCase :str = use_relative_position_bias
UpperCamelCase :List[str] = use_shared_relative_position_bias
UpperCamelCase :Union[str, Any] = layer_scale_init_value
UpperCamelCase :Union[str, Any] = drop_path_rate
UpperCamelCase :Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCamelCase :Tuple = out_indices
UpperCamelCase :Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCamelCase :str = use_auxiliary_head
UpperCamelCase :Optional[Any] = auxiliary_loss_weight
UpperCamelCase :Union[str, Any] = auxiliary_channels
UpperCamelCase :Dict = auxiliary_num_convs
UpperCamelCase :Dict = auxiliary_concat_input
UpperCamelCase :Dict = semantic_loss_ignore_index
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = version.parse("""1.11""" )
@property
def _A ( self : List[Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _A ( self : Optional[int] ):
return 1E-4
| 590
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """trocr"""
snake_case__ : str = ["""past_key_values"""]
snake_case__ : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : List[str] , __lowerCamelCase : int=50_265 , __lowerCamelCase : Tuple=1_024 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : int=4_096 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : List[Any]=512 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Any=2 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Any=True , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=True , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : List[str]=0 , __lowerCamelCase : int=2 , **__lowerCamelCase : Dict , ):
UpperCamelCase :Optional[Any] = vocab_size
UpperCamelCase :str = d_model
UpperCamelCase :Dict = decoder_layers
UpperCamelCase :Tuple = decoder_attention_heads
UpperCamelCase :Tuple = decoder_ffn_dim
UpperCamelCase :List[Any] = activation_function
UpperCamelCase :Dict = max_position_embeddings
UpperCamelCase :Optional[Any] = dropout
UpperCamelCase :List[str] = attention_dropout
UpperCamelCase :int = activation_dropout
UpperCamelCase :List[str] = init_std
UpperCamelCase :int = decoder_layerdrop
UpperCamelCase :List[Any] = use_cache
UpperCamelCase :Optional[Any] = scale_embedding
UpperCamelCase :Any = use_learned_position_embeddings
UpperCamelCase :Tuple = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 590
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowercase__ : List[Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Union[str, Any] = ['''BeitFeatureExtractor''']
lowercase__ : Dict = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 123
| 0
|
def UpperCamelCase__ ( lowerCAmelCase__ = 100 ):
lowercase = 0
lowercase = 0
for i in range(1 ,n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 72
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__SCREAMING_SNAKE_CASE : Tuple =get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures/vocab.json''')
__SCREAMING_SNAKE_CASE : Union[str, Any] =get_tests_dir('''fixtures''')
class A_ ( unittest.TestCase ):
_A :List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = 0
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , """r""" ) as f:
lowercase = json.load(snake_case__ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write(json.dumps(snake_case__ ) )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
lowercase = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
class A_ ( __a ):
_A :List[str] = False
class A_ ( __a ):
_A :Dict = False
class A_ ( __a ):
_A :Union[str, Any] = '''AutoFeatureExtractor'''
_A :Tuple = '''AutoTokenizer'''
_A :Optional[Any] = False
try:
AutoConfig.register("""custom""" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class A_ ( unittest.TestCase ):
_A :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] ):
lowercase = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor""" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , """test-processor-org""" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(snake_case__ , """vocab.txt""" )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(snake_case__ )
lowercase = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase = Repository(snake_case__ , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 72
| 1
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[dict, list, tuple, torch.Tensor] ):
"""simple docstring"""
a_ : Union[str, Any] = []
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple[int, ...] ):
"""simple docstring"""
a_ : List[str] = []
for d in reversed(SCREAMING_SNAKE_CASE_ ):
idx.append(flat_idx % d )
a_ : Any = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE_ ) )
@torch.jit.ignore
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE_ : Optional[Sequence[bool]] = None , ):
"""simple docstring"""
def reduce_edge_list(SCREAMING_SNAKE_CASE_ : List[bool] ) -> None:
a_ : List[Any] = True
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
a_ : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
a_ : List[Any] = l[reversed_idx]
if start_edges is None:
a_ : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
if end_edges is None:
a_ : List[str] = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a_ : List[Tuple[slice, ...]] = []
a_ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE_ , s + 1 ) )
else:
break
a_ : Tuple[slice, ...] = tuple(SCREAMING_SNAKE_CASE_ )
a_ : str = len(SCREAMING_SNAKE_CASE_ )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a_ : List[str] = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a_ : str = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a_ : Union[str, Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
a_ : Dict = t.shape[:no_batch_dims]
a_ : Optional[int] = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# _get_minimal_slice_set is inclusive
a_ : int = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE_ ) )
# Get an ordered list of slices to perform
a_ : Optional[Any] = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
a_ : List[str] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : Dict[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
"""simple docstring"""
if not (len(SCREAMING_SNAKE_CASE_ ) > 0):
raise ValueError("""Must provide at least one input""" )
a_ : int = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE_ )]
a_ : str = tuple([max(SCREAMING_SNAKE_CASE_ ) for s in zip(*SCREAMING_SNAKE_CASE_ )] )
def _prep_inputs(SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a_ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a_ : Tuple = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a_ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE_ )
a_ : Dict = None
if _out is not None:
a_ : List[str] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a_ : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a_ : List[Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a_ : Union[str, Any] = 0
a_ : List[str] = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Chunk the input
if not low_mem:
a_ : Tuple = _select_chunk
else:
a_ : Tuple = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE_ , flat_end=min(SCREAMING_SNAKE_CASE_ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE_ ) , )
a_ : Dict[str, Any] = tensor_tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Run the layer on the chunk
a_ : List[str] = layer(**SCREAMING_SNAKE_CASE_ )
# Allocate space for the output
if out is None:
a_ : int = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def assign(SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assign(SCREAMING_SNAKE_CASE_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a_ : List[Any] = da[k]
assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for xa, xa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a_ : Tuple = xa
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a_ : Union[str, Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
a_ : List[Any] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
return out
class snake_case__ :
def __init__( self , UpperCamelCase_ = 512 , ) -> List[str]:
"""simple docstring"""
a_ : Any = max_chunk_size
a_ : Optional[int] = None
a_ : Optional[tuple] = None
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a_ : int = [c for c in candidates if c > min_chunk_size]
a_ : Dict = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCamelCase_ ) -> bool:
try:
with torch.no_grad():
fn(*UpperCamelCase_ , chunk_size=UpperCamelCase_ )
return True
except RuntimeError:
return False
a_ : int = 0
a_ : Any = len(UpperCamelCase_ ) - 1
while i > min_viable_chunk_size_index:
a_ : int = test_chunk_size(candidates[i] )
if not viable:
a_ : Any = (min_viable_chunk_size_index + i) // 2
else:
a_ : Optional[Any] = i
a_ : Optional[int] = (i + len(UpperCamelCase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
"""simple docstring"""
a_ : Optional[Any] = True
for aa, aa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert type(UpperCamelCase_ ) == type(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
a_ : str = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase_ : x[0] )]
a_ : int = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase_ : x[0] )]
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
else:
consistent &= aa == aa
return consistent
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> int:
"""simple docstring"""
a_ : Optional[int] = True
a_ : tuple = tree_map(lambda UpperCamelCase_ : a.shape if isinstance(UpperCamelCase_ , torch.Tensor ) else a , UpperCamelCase_ , UpperCamelCase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCamelCase_ )
a_ : str = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase_ )
else:
# Otherwise, we can reuse the precomputed value
a_ : List[Any] = False
if not consistent:
a_ : Union[str, Any] = self._determine_favorable_chunk_size(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
a_ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 419
|
from __future__ import annotations
from cmath import sqrt
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
a_ : Any = b * b - 4 * a * c
a_ : List[str] = (-b + sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
a_ : Union[str, Any] = (-b - sqrt(SCREAMING_SNAKE_CASE_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowerCamelCase ( ):
"""simple docstring"""
a_ , a_ : str = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 419
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 615
|
"""simple docstring"""
def _lowercase ( __snake_case = 3 ,__snake_case = 7 ,__snake_case = 1_000_000 ) -> int:
__lowerCAmelCase : Optional[Any] = 0
__lowerCAmelCase : List[str] = 1
for current_denominator in range(1 ,limit + 1 ):
__lowerCAmelCase : Optional[int] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCAmelCase : Union[str, Any] = current_numerator
__lowerCAmelCase : int = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 615
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Tuple = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ['LayoutLMv2FeatureExtractor']
snake_case_ : str = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 195
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : int ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=13 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : int=30 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : str=True ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] ,lowerCamelCase__ : Optional[int]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_UpperCamelCase : Dict = size if size is not None else {'height': 18, 'width': 18}
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = image_size
_UpperCamelCase : Optional[Any] = min_resolution
_UpperCamelCase : Dict = max_resolution
_UpperCamelCase : Tuple = do_resize
_UpperCamelCase : Optional[int] = size
_UpperCamelCase : Tuple = do_normalize
_UpperCamelCase : str = image_mean
_UpperCamelCase : int = image_std
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = ViTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
_UpperCamelCase : int = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : int = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
_UpperCamelCase : Union[str, Any] = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : Optional[Any] = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
_UpperCamelCase : Dict = image_processor(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
# Test batched
_UpperCamelCase : Any = image_processor(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) ,)
| 195
| 1
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "segformer"
def __init__( self: Union[str, Any], a_: List[Any]=3, a_: int=4, a_: int=[2, 2, 2, 2], a_: Union[str, Any]=[8, 4, 2, 1], a_: Optional[Any]=[32, 64, 160, 256], a_: Optional[Any]=[7, 3, 3, 3], a_: Optional[int]=[4, 2, 2, 2], a_: str=[1, 2, 5, 8], a_: List[str]=[4, 4, 4, 4], a_: Tuple="gelu", a_: List[str]=0.0, a_: Dict=0.0, a_: Dict=0.1, a_: Tuple=0.02, a_: List[str]=0.1, a_: List[Any]=1E-6, a_: Union[str, Any]=256, a_: Optional[int]=255, **a_: Tuple, ):
'''simple docstring'''
super().__init__(**a_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""", a_, )
_snake_case : Union[str, Any] = num_channels
_snake_case : Tuple = num_encoder_blocks
_snake_case : Dict = depths
_snake_case : Dict = sr_ratios
_snake_case : Tuple = hidden_sizes
_snake_case : Dict = patch_sizes
_snake_case : List[str] = strides
_snake_case : Optional[int] = mlp_ratios
_snake_case : List[Any] = num_attention_heads
_snake_case : List[Any] = hidden_act
_snake_case : Dict = hidden_dropout_prob
_snake_case : int = attention_probs_dropout_prob
_snake_case : Union[str, Any] = classifier_dropout_prob
_snake_case : str = initializer_range
_snake_case : Optional[int] = drop_path_rate
_snake_case : Tuple = layer_norm_eps
_snake_case : Tuple = decoder_hidden_size
_snake_case : Optional[Any] = kwargs.get("""reshape_last_stage""", a_ )
_snake_case : Any = semantic_loss_ignore_index
class lowercase( __a ):
'''simple docstring'''
lowercase__ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return 1E-4
@property
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return 12
| 28
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28
| 1
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def A_ ( snake_case , snake_case ):
# ===== initialization =====
SCREAMING_SNAKE_CASE:Optional[Any] = Mock()
SCREAMING_SNAKE_CASE:Optional[int] = conn, Mock()
SCREAMING_SNAKE_CASE:List[Any] = iter([1, None] )
SCREAMING_SNAKE_CASE:Optional[int] = lambda snake_case : next(snake_case )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=snake_case )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 143
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = int(snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case=300 ):
# docstyle-ignore
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Dict = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE:str = F'''{elt:.6f}''' if isinstance(snake_case , snake_case ) else str(snake_case )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _snake_case :
_A : Any = 5
_A : Optional[Any] = 0.2
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional["NotebookTrainingTracker"] = None ,SCREAMING_SNAKE_CASE__ : int = 300 ,):
SCREAMING_SNAKE_CASE:List[Any] = total
SCREAMING_SNAKE_CASE:Dict = "" if prefix is None else prefix
SCREAMING_SNAKE_CASE:Optional[int] = leave
SCREAMING_SNAKE_CASE:Optional[Any] = parent
SCREAMING_SNAKE_CASE:List[Any] = width
SCREAMING_SNAKE_CASE:Optional[Any] = None
SCREAMING_SNAKE_CASE:Optional[int] = None
SCREAMING_SNAKE_CASE:int = None
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : str = None ):
SCREAMING_SNAKE_CASE:Any = value
if comment is not None:
SCREAMING_SNAKE_CASE:List[Any] = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE:Any = time.time()
SCREAMING_SNAKE_CASE:Any = value
SCREAMING_SNAKE_CASE:Optional[Any] = None
SCREAMING_SNAKE_CASE:Union[str, Any] = self.warmup
SCREAMING_SNAKE_CASE:List[Any] = 1
self.update_bar(SCREAMING_SNAKE_CASE__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE:Any = time.time()
SCREAMING_SNAKE_CASE:Union[str, Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE:Optional[int] = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE:int = None
if value >= self.total:
SCREAMING_SNAKE_CASE:Any = self.total
SCREAMING_SNAKE_CASE:List[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE:List[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = value
SCREAMING_SNAKE_CASE:str = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE:Optional[int] = 1
else:
SCREAMING_SNAKE_CASE:int = max(int(self.update_every / self.average_time_per_item ) ,1 )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
SCREAMING_SNAKE_CASE:Dict = " " * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE__ ) )) + str(SCREAMING_SNAKE_CASE__ )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE:List[Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
SCREAMING_SNAKE_CASE:List[str] = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Optional[Any] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE:Optional[Any] = disp.display(disp.HTML(self.html_code ) ,display_id=SCREAMING_SNAKE_CASE__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCamelCase ( self : Optional[Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("" ) )
class _snake_case ( _a ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=None ):
super().__init__(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE:List[Any] = None
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Optional[int] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE:Any = disp.display(disp.HTML(self.html_code ) ,display_id=SCREAMING_SNAKE_CASE__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any ):
if self.inner_table is None:
SCREAMING_SNAKE_CASE:int = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE:Dict = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = columns
self.inner_table.append([values[c] for c in columns] )
def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Dict=300 ):
SCREAMING_SNAKE_CASE:Dict = NotebookProgressBar(SCREAMING_SNAKE_CASE__ ,prefix=SCREAMING_SNAKE_CASE__ ,parent=self ,width=SCREAMING_SNAKE_CASE__ )
return self.child_bar
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:int = None
self.display()
class _snake_case ( _a ):
def __init__( self : str ):
SCREAMING_SNAKE_CASE:Any = None
SCREAMING_SNAKE_CASE:Any = None
SCREAMING_SNAKE_CASE:Optional[Any] = False
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Any = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:Dict = 0
SCREAMING_SNAKE_CASE:List[Any] = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss" )
SCREAMING_SNAKE_CASE:str = NotebookTrainingTracker(state.max_steps ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,**SCREAMING_SNAKE_CASE__ : int ):
SCREAMING_SNAKE_CASE:Dict = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 ,comment=F'''Epoch {epoch}/{state.num_train_epochs}''' ,force_update=self._force_next_update ,)
SCREAMING_SNAKE_CASE:List[str] = False
def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any=None ,**SCREAMING_SNAKE_CASE__ : str ):
if not has_length(SCREAMING_SNAKE_CASE__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE:List[Any] = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE:Dict = NotebookProgressBar(len(SCREAMING_SNAKE_CASE__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Tuple ):
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE:Union[str, Any] = None
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any=None ,**SCREAMING_SNAKE_CASE__ : List[Any] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE:Tuple = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE:Dict = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,**SCREAMING_SNAKE_CASE__ : int ):
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE:Union[str, Any] = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE:Optional[Any] = log["loss"]
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE:List[str] = int(state.epoch )
else:
SCREAMING_SNAKE_CASE:List[str] = state.global_step
SCREAMING_SNAKE_CASE:Any = "eval"
for k in metrics:
if k.endswith("_loss" ):
SCREAMING_SNAKE_CASE:Dict = re.sub(R"\_loss$" ,"" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = metrics.pop("total_flos" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = metrics.pop("epoch" ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = metrics.pop(F'''{metric_key_prefix}_runtime''' ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' ,SCREAMING_SNAKE_CASE__ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
SCREAMING_SNAKE_CASE:Dict = v
else:
SCREAMING_SNAKE_CASE:str = k.split("_" )
SCREAMING_SNAKE_CASE:Any = " ".join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE:Tuple = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE__ )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE:Optional[int] = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE:Tuple = True
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Dict ):
self.training_tracker.update(
state.global_step ,comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' ,force_update=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = None
| 143
| 1
|
__lowerCamelCase : Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__lowerCamelCase : Dict = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__lowerCamelCase : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 719
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =BlenderbotSmallTokenizer
UpperCamelCase__ =False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
super().setUp()
__magic_name__ : Union[str, Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__magic_name__ : Tuple = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
__magic_name__ : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__magic_name__ : List[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__magic_name__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def UpperCAmelCase__ ( self : List[str] , **lowerCamelCase_ : Optional[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = '''adapt act apte'''
__magic_name__ : Dict = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
__magic_name__ : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ : str = '''adapt act apte'''
__magic_name__ : Any = ['''adapt''', '''act''', '''ap@@''', '''te''']
__magic_name__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__magic_name__ : List[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase__ ( self : int ) -> int:
__magic_name__ : Any = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__magic_name__ : Dict = '''I am a small frog.'''
__magic_name__ : Tuple = tok([src_text] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
__magic_name__ : Tuple = tok.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
__magic_name__ : Tuple = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__magic_name__ : Any = '''I am a small frog .'''
__magic_name__ : List[str] = '''.'''
__magic_name__ : Tuple = tok(lowerCamelCase_ )['''input_ids''']
__magic_name__ : Optional[Any] = tok(lowerCamelCase_ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 501
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
SCREAMING_SNAKE_CASE_ = {"""allegro/herbert-base-cased""": 5_14}
SCREAMING_SNAKE_CASE_ = {}
class a ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = HerbertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_="</s>" , **snake_case_ , ):
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , **lowerCamelCase__ , )
def lowercase_ ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = [self.cls_token_id]
__UpperCAmelCase: Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowercase_ ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
__UpperCAmelCase: Dict = [self.sep_token_id]
__UpperCAmelCase: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 523
|
def snake_case__ ( lowercase ):
lowerCAmelCase_: Union[str, Any] = [1]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: int = 0, 0, 0
lowerCAmelCase_: Union[str, Any] = ugly_nums[ia] * 2
lowerCAmelCase_: str = ugly_nums[ia] * 3
lowerCAmelCase_: Dict = ugly_nums[ia] * 5
for _ in range(1 , lowercase ):
lowerCAmelCase_: Any = min(lowercase , lowercase , lowercase )
ugly_nums.append(lowercase )
if next_num == next_a:
ia += 1
lowerCAmelCase_: str = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCAmelCase_: Optional[int] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCAmelCase_: int = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_0_0) = }''')
| 613
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ ( __lowercase ):
'''simple docstring'''
def __init__( self, A_, A_=13, A_=7, A_=True, A_=True, A_=True, A_=True, A_=True, A_=False, A_=False, A_=False, A_=2, A_=99, A_=0, A_=32, A_=5, A_=4, A_=0.1, A_=0.1, A_=512, A_=12, A_=2, A_=0.02, A_=3, A_=4, A_="last", A_=None, A_=None, ) -> List[str]:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =seq_length
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_input_lengths
UpperCAmelCase__ =use_token_type_ids
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =gelu_activation
UpperCAmelCase__ =sinusoidal_embeddings
UpperCAmelCase__ =causal
UpperCAmelCase__ =asm
UpperCAmelCase__ =n_langs
UpperCAmelCase__ =vocab_size
UpperCAmelCase__ =n_special
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =max_position_embeddings
UpperCAmelCase__ =type_vocab_size
UpperCAmelCase__ =type_sequence_label_size
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =num_labels
UpperCAmelCase__ =num_choices
UpperCAmelCase__ =summary_type
UpperCAmelCase__ =use_proj
UpperCAmelCase__ =scope
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase__ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ =None
if self.use_input_lengths:
UpperCAmelCase__ =(
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase__ =None
if self.use_token_type_ids:
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.n_langs )
UpperCAmelCase__ =None
UpperCAmelCase__ =None
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase__ =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase__ =ids_tensor([self.batch_size], 2 ).float()
UpperCAmelCase__ =ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase__ =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCAmelCase ( self ) -> Optional[int]:
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Optional[Any]:
UpperCAmelCase__ =FlaubertModel(config=A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_, lengths=A_, langs=A_ )
UpperCAmelCase__ =model(A_, langs=A_ )
UpperCAmelCase__ =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Optional[int]:
UpperCAmelCase__ =FlaubertWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_, token_type_ids=A_, labels=A_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Optional[int]:
UpperCAmelCase__ =FlaubertForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
UpperCAmelCase__ =model(A_, start_positions=A_, end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Optional[int]:
UpperCAmelCase__ =FlaubertForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
UpperCAmelCase__ =model(
A_, start_positions=A_, end_positions=A_, cls_index=A_, is_impossible=A_, p_mask=A_, )
UpperCAmelCase__ =model(
A_, start_positions=A_, end_positions=A_, cls_index=A_, is_impossible=A_, )
(UpperCAmelCase__ ) =result_with_labels.to_tuple()
UpperCAmelCase__ =model(A_, start_positions=A_, end_positions=A_ )
(UpperCAmelCase__ ) =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> int:
UpperCAmelCase__ =FlaubertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_ )
UpperCAmelCase__ =model(A_, labels=A_ )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> Dict:
UpperCAmelCase__ =self.num_labels
UpperCAmelCase__ =FlaubertForTokenClassification(A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =model(A_, attention_mask=A_, labels=A_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_, A_, A_, A_, A_, ) -> List[Any]:
UpperCAmelCase__ =self.num_choices
UpperCAmelCase__ =FlaubertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
UpperCAmelCase__ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase__ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase__ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase__ =model(
A_, attention_mask=A_, token_type_ids=A_, labels=A_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =self.prepare_config_and_inputs()
(
UpperCAmelCase__
) =config_and_inputs
UpperCAmelCase__ ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ ( __lowercase, __lowercase, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self, A_, A_, A_, A_, A_ ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCAmelCase ( self, A_, A_, A_=False ) -> Optional[Any]:
UpperCAmelCase__ =super()._prepare_for_class(A_, A_, return_labels=A_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
UpperCAmelCase__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A_ )
UpperCAmelCase__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A_ )
return inputs_dict
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =FlaubertModelTester(self )
UpperCAmelCase__ =ConfigTester(self, config_class=A_, emb_dim=37 )
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*A_ )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*A_ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*A_ )
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ =FlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@slow
@require_torch_gpu
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(config=A_ )
UpperCAmelCase__ =self._prepare_for_class(A_, A_ )
UpperCAmelCase__ =torch.jit.trace(
A_, (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A_, os.path.join(A_, "traced_model.pt" ) )
UpperCAmelCase__ =torch.jit.load(os.path.join(A_, "traced_model.pt" ), map_location=A_ )
loaded(inputs_dict["input_ids"].to(A_ ), inputs_dict["attention_mask"].to(A_ ) )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
UpperCAmelCase__ =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
UpperCAmelCase__ =model(A_ )[0]
UpperCAmelCase__ =torch.Size((1, 11, 768) )
self.assertEqual(output.shape, A_ )
UpperCAmelCase__ =torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], A_, atol=1E-4 ) )
| 706
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 'glpn'
def __init__( self, A_=3, A_=4, A_=[2, 2, 2, 2], A_=[8, 4, 2, 1], A_=[32, 64, 160, 256], A_=[7, 3, 3, 3], A_=[4, 2, 2, 2], A_=[1, 2, 5, 8], A_=[4, 4, 4, 4], A_="gelu", A_=0.0, A_=0.0, A_=0.02, A_=0.1, A_=1E-6, A_=64, A_=10, A_=-1, **A_, ) -> Tuple:
super().__init__(**A_ )
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =num_encoder_blocks
UpperCAmelCase__ =depths
UpperCAmelCase__ =sr_ratios
UpperCAmelCase__ =hidden_sizes
UpperCAmelCase__ =patch_sizes
UpperCAmelCase__ =strides
UpperCAmelCase__ =mlp_ratios
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =drop_path_rate
UpperCAmelCase__ =layer_norm_eps
UpperCAmelCase__ =decoder_hidden_size
UpperCAmelCase__ =max_depth
UpperCAmelCase__ =head_in_index
| 510
| 0
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 90
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __UpperCAmelCase ( pl.LightningModule ):
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__()
_snake_case = model
_snake_case = 2
_snake_case = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[str]:
# load longformer model from model identifier
_snake_case = LongformerModel.from_pretrained(__A )
_snake_case = LightningModel(__A )
_snake_case = torch.load(__A , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
_snake_case = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 495
| 0
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase__ :
def __init__( self , a = None ) -> None:
'''simple docstring'''
if components is None:
_UpperCamelCase = []
_UpperCamelCase = list(a )
def __len__( self ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self ) -> str:
'''simple docstring'''
return "(" + ",".join(map(a , self.__components ) ) + ")"
def __add__( self , a ) -> Vector:
'''simple docstring'''
_UpperCamelCase = len(self )
if size == len(a ):
_UpperCamelCase = [self.__components[i] + other.component(a ) for i in range(a )]
return Vector(a )
else:
raise Exception("""must have the same size""" )
def __sub__( self , a ) -> Vector:
'''simple docstring'''
_UpperCamelCase = len(self )
if size == len(a ):
_UpperCamelCase = [self.__components[i] - other.component(a ) for i in range(a )]
return Vector(a )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , a ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self , a ) -> float:
'''simple docstring'''
...
def __mul__( self , a ) -> float | Vector:
'''simple docstring'''
if isinstance(a , (float, int) ):
_UpperCamelCase = [c * other for c in self.__components]
return Vector(a )
elif isinstance(a , a ) and len(self ) == len(a ):
_UpperCamelCase = len(self )
_UpperCamelCase = [self.__components[i] * other.component(a ) for i in range(a )]
return sum(a )
else: # error case
raise Exception("""invalid operand!""" )
def A_ ( self ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def A_ ( self , a ) -> float:
'''simple docstring'''
if isinstance(a , a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def A_ ( self , a , a ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
_UpperCamelCase = value
def A_ ( self ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
_UpperCamelCase = [c**2 for c in self.__components]
return math.sqrt(sum(a ) )
def A_ ( self , a , a = False ) -> float:
'''simple docstring'''
_UpperCamelCase = self * other
_UpperCamelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __A(lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
return Vector([0] * dimension )
def __A(lowerCAmelCase , lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (isinstance(lowerCAmelCase , lowerCAmelCase ))
_UpperCamelCase = [0] * dimension
_UpperCamelCase = 1
return Vector(lowerCAmelCase )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Vector:
"""simple docstring"""
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (isinstance(lowerCAmelCase , (int, float) ))
)
return x * scalar + y
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Vector:
"""simple docstring"""
random.seed(lowerCAmelCase )
_UpperCamelCase = [random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class lowerCAmelCase__ :
def __init__( self , a , a , a ) -> None:
'''simple docstring'''
_UpperCamelCase = matrix
_UpperCamelCase = w
_UpperCamelCase = h
def __str__( self ) -> str:
'''simple docstring'''
_UpperCamelCase = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , a ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase = []
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] + other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , a ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
_UpperCamelCase = []
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] - other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , a ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self , a ) -> Vector:
'''simple docstring'''
...
def __mul__( self , a ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(a , a ): # matrix-vector
if len(a ) == self.__width:
_UpperCamelCase = zero_vector(self.__height )
for i in range(self.__height ):
_UpperCamelCase = [
self.__matrix[i][j] * other.component(a )
for j in range(self.__width )
]
ans.change_component(a , sum(a ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(a , (int, float) ): # matrix-scalar
_UpperCamelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(a , self.__width , self.__height )
return None
def A_ ( self ) -> int:
'''simple docstring'''
return self.__height
def A_ ( self ) -> int:
'''simple docstring'''
return self.__width
def A_ ( self , a , a ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def A_ ( self , a , a , a ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
_UpperCamelCase = value
else:
raise Exception("""change_component: indices out of bounds""" )
def A_ ( self , a , a ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
_UpperCamelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(a ) ):
_UpperCamelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(a , self.__width - 1 , self.__height - 1 ).determinant()
def A_ ( self , a , a ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(a , a )
else:
raise Exception("""Indices out of bounds""" )
def A_ ( self ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_UpperCamelCase = [
self.__matrix[0][y] * self.cofactor(0 , a ) for y in range(self.__width )
]
return sum(a )
def __A(lowerCAmelCase ) -> Matrix:
"""simple docstring"""
_UpperCamelCase = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Matrix:
"""simple docstring"""
random.seed(lowerCAmelCase )
_UpperCamelCase = [
[random.randint(lowerCAmelCase , lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
| 202
|
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __A(lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if "model" in orig_key:
_UpperCamelCase = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
_UpperCamelCase = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
_UpperCamelCase = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
_UpperCamelCase = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
_UpperCamelCase = orig_key.split(""".""" )[0].split("""_""" )[-1]
_UpperCamelCase = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_UpperCamelCase = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
_UpperCamelCase = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
_UpperCamelCase = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
_UpperCamelCase = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
_UpperCamelCase = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
_UpperCamelCase = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
_UpperCamelCase = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
_UpperCamelCase = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
_UpperCamelCase = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
_UpperCamelCase = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
_UpperCamelCase = """yoso.""" + orig_key
return orig_key
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase = orig_state_dict.pop(lowerCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_UpperCamelCase = val
_UpperCamelCase = orig_state_dict["""cls.predictions.decoder.bias"""]
_UpperCamelCase = torch.arange(lowerCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )["""model_state_dict"""]
_UpperCamelCase = YosoConfig.from_json_file(lowerCAmelCase )
_UpperCamelCase = YosoForMaskedLM(lowerCAmelCase )
_UpperCamelCase = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase )
print(model.load_state_dict(lowerCAmelCase ) )
model.eval()
model.save_pretrained(lowerCAmelCase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase__ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 202
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ : Optional[Any] = XLNetTokenizer
lowercase_ : Any = XLNetTokenizerFast
lowercase_ : Dict = True
lowercase_ : Dict = True
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = XLNetTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = "<s>"
__lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(snake_case__ ) , 1_006 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = XLNetTokenizer(snake_case__ , keep_accents=snake_case__ )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = XLNetTokenizer(snake_case__ , do_lower_case=snake_case__ )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = XLNetTokenizer(snake_case__ , do_lower_case=snake_case__ )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
__lowerCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case__ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 611
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple ):
"""simple docstring"""
__lowerCAmelCase = 1.5
__lowerCAmelCase = int(factor * num_class_images )
__lowerCAmelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
__lowerCAmelCase = client.query(text=UpperCamelCase )
if len(UpperCamelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
__lowerCAmelCase = int(factor * num_images )
__lowerCAmelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=UpperCamelCase , aesthetic_weight=0.1 , )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = tqdm(desc="downloading real regularization images" , total=UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , "w" ) as fa, open(F"{class_data_dir}/urls.txt" , "w" ) as fa, open(
F"{class_data_dir}/images.txt" , "w" ) as fa:
while total < num_class_images:
__lowerCAmelCase = class_images[count]
count += 1
try:
__lowerCAmelCase = requests.get(images["url"] )
if img.status_code == 2_0_0:
__lowerCAmelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = argparse.ArgumentParser("" , add_help=UpperCamelCase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=UpperCamelCase , type=UpperCamelCase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=UpperCamelCase , type=UpperCamelCase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=2_0_0 , type=UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 611
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : Any = MobileBertTokenizer
a : Any = MobileBertTokenizerFast
a : int = True
a : int = True
a : Dict = filter_non_english
a : Union[str, Any] = '''google/mobilebert-uncased'''
def lowercase ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def lowercase ( self ):
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
_SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=A__ )
_SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A__ )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=A__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(A__ ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=A__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def lowercase ( self ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowercase ( self ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowercase ( self ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowercase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
_SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
_SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(A__ , "do_lower_case" ) else False
_SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
_SCREAMING_SNAKE_CASE = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(A__ , **A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.encode(A__ , add_special_tokens=A__ )
_SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(A__ )
_SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
_SCREAMING_SNAKE_CASE = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 720
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
a : int = LayoutLMTokenizer
a : Optional[int] = LayoutLMTokenizerFast
a : Optional[int] = True
a : Any = True
def lowercase ( self ):
super().setUp()
_SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self , **UpperCamelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def lowercase ( self , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE = "unwanted, running"
return input_text, output_text
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self ):
pass
| 493
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = "▁"
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Dict = BigBirdTokenizer
__lowerCamelCase : Optional[Any] = BigBirdTokenizerFast
__lowerCamelCase : Any = True
__lowerCamelCase : Union[str, Any] = True
def _snake_case ( self ) -> Any:
super().setUp()
_lowerCAmelCase = self.tokenizer_class(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = "<s>"
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(_lowerCAmelCase ) , 1004 )
def _snake_case ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = "I was born in 92000, and this is falsé."
_lowerCAmelCase = tokenizer.tokenize(_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(_lowerCAmelCase )
_lowerCAmelCase = rust_tokenizer.encode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase = BigBirdTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
_lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _snake_case ( self ) -> Any:
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = "Hello World!"
_lowerCAmelCase = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
_lowerCAmelCase = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@require_torch
@slow
def _snake_case ( self ) -> Any:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowerCAmelCase = " ".join(_lowerCAmelCase )
_lowerCAmelCase = self.big_tokenizer.encode_plus(_lowerCAmelCase , return_tensors="pt" , return_token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = BigBirdConfig(attention_type="original_full" )
_lowerCAmelCase = BigBirdModel(_lowerCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCAmelCase )
model(**_lowerCAmelCase )
@slow
def _snake_case ( self ) -> int:
_lowerCAmelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
_lowerCAmelCase = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def _snake_case ( self ) -> Any:
# fmt: off
_lowerCAmelCase = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 18
|
import math
def lowerCAmelCase__ ( _a : float , _a : float ):
if (
not isinstance(_a , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def lowerCAmelCase__ ( _a : float , _a : float ):
if (
not isinstance(_a , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 568
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=7 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Optional[Any]=18 , __magic_name__ : Optional[int]=30 , __magic_name__ : List[Any]=400 , __magic_name__ : int=True , __magic_name__ : str=32 , __magic_name__ : Optional[int]=True , ) -> Tuple:
lowerCamelCase_ : Optional[Any] = parent
lowerCamelCase_ : int = batch_size
lowerCamelCase_ : Any = num_channels
lowerCamelCase_ : Tuple = image_size
lowerCamelCase_ : Dict = min_resolution
lowerCamelCase_ : str = max_resolution
lowerCamelCase_ : List[str] = do_resize
lowerCamelCase_ : Optional[Any] = size_divisor
lowerCamelCase_ : int = do_rescale
def __SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class snake_case_ ( __a , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = GLPNImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
lowerCamelCase_ : Optional[Any] = GLPNImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
lowerCamelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "size_divisor" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "resample" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , "do_rescale" ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
lowerCamelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
lowerCamelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCamelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 719
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str]=False ) -> Any:
"""simple docstring"""
try:
lowerCamelCase_ : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase_ : Dict = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase_ : Any = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
snake_case_ : Dict = parse_flag_from_env("RUN_SLOW", default=False)
snake_case_ : int = parse_flag_from_env("RUN_REMOTE", default=False)
snake_case_ : List[str] = parse_flag_from_env("RUN_LOCAL", default=True)
snake_case_ : int = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
snake_case_ : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
snake_case_ : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
snake_case_ : List[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
snake_case_ : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
snake_case_ : List[Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
snake_case_ : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
snake_case_ : int = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def __a ( __UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
lowerCamelCase_ : Dict = unittest.skip("test requires faiss" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
lowerCamelCase_ : Union[str, Any] = unittest.skip("test requires regex" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase_ : Dict = unittest.skip("test requires elasticsearch" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase_ : Optional[int] = unittest.skip("test requires sqlalchemy" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
lowerCamelCase_ : Dict = unittest.skip("test requires PyTorch" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if not config.TF_AVAILABLE:
lowerCamelCase_ : Optional[Any] = unittest.skip("test requires TensorFlow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
if not config.JAX_AVAILABLE:
lowerCamelCase_ : Tuple = unittest.skip("test requires JAX" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
lowerCamelCase_ : int = unittest.skip("test requires Pillow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
def _require_spacy_model(__UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCAmelCase )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def __a ( __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase_ : Any = unittest.skip("test is slow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase_ : List[Any] = unittest.skip("test is local" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase_ : Union[str, Any] = unittest.skip("test is packaged" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase_ : Any = unittest.skip("test requires remote" )(__UpperCAmelCase )
return test_case
def __a ( *__UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith("test" ):
for decorator in decorators:
lowerCamelCase_ : int = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 2
@contextmanager
def __a ( __UpperCAmelCase : Tuple=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : Any=1e-16 ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase_ : str = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
lowerCamelCase_ : List[Any] = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase_ : Any = url
lowerCamelCase_ : Tuple = e.args[0]
lowerCamelCase_ : Union[str, Any] = (max_retry_error.args[0].replace("10.255.255.1" , f"OfflineMock[{url}]" ),)
lowerCamelCase_ : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , **__UpperCAmelCase : Dict ):
raise requests.ConnectionError("Offline mode is enabled." , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __UpperCAmelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __a ( *__UpperCAmelCase : int , **__UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase_ : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __a ( ) -> int:
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return deepcopy(__UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 100 , 10 ).tolist()
def __a ( __UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : Any , *__UpperCAmelCase : Dict , **__UpperCAmelCase : str ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith("500" ) or str(__UpperCAmelCase ).startswith("502" ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Any:
lowerCamelCase_ : int = returncode
lowerCamelCase_ : int = stdout
lowerCamelCase_ : Union[str, Any] = stderr
async def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
while True:
lowerCamelCase_ : List[str] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(__UpperCAmelCase ) )
lowerCamelCase_ : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Optional[Any] = []
def tee(__UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]="" ):
lowerCamelCase_ : Optional[int] = line.decode("utf-8" ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label="stderr:" ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=180 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
lowerCamelCase_ : List[str] = asyncio.get_event_loop()
lowerCamelCase_ : Tuple = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
lowerCamelCase_ : Tuple = " ".join(__UpperCAmelCase )
if result.returncode > 0:
lowerCamelCase_ : int = "\n".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
lowerCamelCase_ : Optional[Any] = re.sub(R"^gw" , "" , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def __a ( ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = 29500
lowerCamelCase_ : int = pytest_xdist_worker_id()
return port + uniq_delta
| 253
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 171
|
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ):
lowerCAmelCase_ : Any = len(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = []
for i in range(len(__UpperCamelCase ) - pat_len + 1 ):
lowerCAmelCase_ : str = True
for j in range(__UpperCamelCase ):
if s[i + j] != pattern[j]:
lowerCAmelCase_ : List[Any] = False
break
if match_found:
position.append(__UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 171
| 1
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__: Dict = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
A__: int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 721
|
def lowerCAmelCase_ ( ):
for n in range(1 ,1_00_00_00):
yield n * (n + 1) // 2
def lowerCAmelCase_ ( A_):
UpperCamelCase__: int = 1
UpperCamelCase__: Dict = 2
while i * i <= n:
UpperCamelCase__: Any = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase_ ( ):
return next(i for i in triangle_number_generator() if count_divisors(A_) > 5_00)
if __name__ == "__main__":
print(solution())
| 221
| 0
|
import re
from filelock import FileLock
try:
import nltk
__A = True
except (ImportError, ModuleNotFoundError):
__A = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 68
|
def UpperCAmelCase__( __UpperCAmelCase : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__snake_case : str = 4
__snake_case : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
__snake_case : List[str] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 576
| 0
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_A = Lock()
def UpperCAmelCase ( a_, a_, a_, a_, a_, a_, a_ ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase : int = min(a_, a_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase : List[Any] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase : str = max(a_, a_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : str = []
lowerCamelCase : List[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase : Any = Pipe()
lowerCamelCase : List[Any] = Pipe()
process_array_.append(
Process(
target=a_, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
lowerCamelCase : List[str] = temp_rs
lowerCamelCase : Optional[int] = temp_rr
for i in range(1, len(a_ ) - 1 ):
lowerCamelCase : Tuple = Pipe()
lowerCamelCase : Dict = Pipe()
process_array_.append(
Process(
target=a_, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
lowerCamelCase : List[str] = temp_rs
lowerCamelCase : Optional[Any] = temp_rr
process_array_.append(
Process(
target=a_, args=(
len(a_ ) - 1,
arr[len(a_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a_ ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(a_ ) ):
lowerCamelCase : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : int = list(range(10, 0, -1 ) )
print('Initial List' )
print(*a_ )
lowerCamelCase : int = odd_even_transposition(a_ )
print('Sorted List\n' )
print(*a_ )
if __name__ == "__main__":
main()
| 133
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
_A = 'us-east-1' # defaults region
@dataclass
class _lowercase :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _UpperCamelCase ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def _UpperCamelCase ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = SageMakerTestEnvironment(framework=request.cls.framework )
| 133
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_a = logging.get_logger("""transformers.models.encodec""")
_a = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
_a = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
_a = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
_a = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
_a = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_a = []
_a = []
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
elif weight_type == "running_mean":
_UpperCamelCase = value
elif weight_type == "running_var":
_UpperCamelCase = value
elif weight_type == "num_batches_tracked":
_UpperCamelCase = value
elif weight_type == "weight_ih_l0":
_UpperCamelCase = value
elif weight_type == "weight_hh_l0":
_UpperCamelCase = value
elif weight_type == "bias_ih_l0":
_UpperCamelCase = value
elif weight_type == "bias_hh_l0":
_UpperCamelCase = value
elif weight_type == "weight_ih_l1":
_UpperCamelCase = value
elif weight_type == "weight_hh_l1":
_UpperCamelCase = value
elif weight_type == "bias_ih_l1":
_UpperCamelCase = value
elif weight_type == "bias_hh_l1":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCamelCase , _UpperCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
_UpperCamelCase = MAPPING_24K
elif model_name == "encodec_48khz":
_UpperCamelCase = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__snake_case, __snake_case ):
logger.info(F'''{name} was ignored''' )
continue
_UpperCamelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_UpperCamelCase , _UpperCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_UpperCamelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "weight_ih_l0" in name:
_UpperCamelCase = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_UpperCamelCase = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_UpperCamelCase = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_UpperCamelCase = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_UpperCamelCase = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_UpperCamelCase = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_UpperCamelCase = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_UpperCamelCase = '''bias_hh_l1'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
elif "running_mean" in name:
_UpperCamelCase = '''running_mean'''
elif "running_var" in name:
_UpperCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
_UpperCamelCase = '''num_batches_tracked'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Dict:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase = EncodecConfig.from_pretrained(__snake_case )
else:
_UpperCamelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_UpperCamelCase = [8, 5, 4, 4]
_UpperCamelCase = [2.2]
_UpperCamelCase = 64
_UpperCamelCase = 3_20_00
_UpperCamelCase = 20_48
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
elif model_name == "encodec_48khz":
_UpperCamelCase = [8, 5, 4, 2]
_UpperCamelCase = [3.0, 6.0, 12.0, 24.0]
_UpperCamelCase = 4_80_00
_UpperCamelCase = 2
_UpperCamelCase = False
_UpperCamelCase = '''time_group_norm'''
_UpperCamelCase = True
_UpperCamelCase = 1.0
_UpperCamelCase = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_UpperCamelCase = EncodecModel(__snake_case )
_UpperCamelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(__snake_case )
_UpperCamelCase = torch.load(__snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_UpperCamelCase = original_checkpoint['''best_state''']
recursively_load_weights(__snake_case, __snake_case, __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_a = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 19
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'wavlm'
def __init__( self , __a=32 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.1 , __a=0.1 , __a=0.02 , __a=1e-5 , __a="group" , __a="gelu" , __a=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __a=(5, 2, 2, 2, 2, 2, 2) , __a=(10, 3, 3, 3, 3, 2, 2) , __a=False , __a=1_28 , __a=16 , __a=3_20 , __a=8_00 , __a=False , __a=True , __a=0.05 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=3_20 , __a=2 , __a=0.1 , __a=1_00 , __a=2_56 , __a=2_56 , __a=0.1 , __a="mean" , __a=False , __a=False , __a=2_56 , __a=(5_12, 5_12, 5_12, 5_12, 15_00) , __a=(5, 3, 3, 1, 1) , __a=(1, 2, 3, 1, 1) , __a=5_12 , __a=80 , __a=0 , __a=1 , __a=2 , __a=False , __a=3 , __a=2 , __a=3 , __a=None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a)
_UpperCamelCase = hidden_size
_UpperCamelCase = feat_extract_norm
_UpperCamelCase = feat_extract_activation
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = conv_bias
_UpperCamelCase = num_buckets
_UpperCamelCase = max_bucket_distance
_UpperCamelCase = num_conv_pos_embeddings
_UpperCamelCase = num_conv_pos_embedding_groups
_UpperCamelCase = len(self.conv_dim)
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = feat_proj_dropout
_UpperCamelCase = final_dropout
_UpperCamelCase = layerdrop
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = num_ctc_classes
_UpperCamelCase = vocab_size
_UpperCamelCase = do_stable_layer_norm
_UpperCamelCase = use_weighted_layer_sum
_UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCamelCase = apply_spec_augment
_UpperCamelCase = mask_time_prob
_UpperCamelCase = mask_time_length
_UpperCamelCase = mask_time_min_masks
_UpperCamelCase = mask_feature_prob
_UpperCamelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCamelCase = num_codevectors_per_group
_UpperCamelCase = num_codevector_groups
_UpperCamelCase = contrastive_logits_temperature
_UpperCamelCase = num_negatives
_UpperCamelCase = codevector_dim
_UpperCamelCase = proj_codevector_dim
_UpperCamelCase = diversity_loss_weight
# ctc loss
_UpperCamelCase = ctc_loss_reduction
_UpperCamelCase = ctc_zero_infinity
# adapter
_UpperCamelCase = add_adapter
_UpperCamelCase = adapter_kernel_size
_UpperCamelCase = adapter_stride
_UpperCamelCase = num_adapter_layers
_UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = list(__a)
_UpperCamelCase = xvector_output_dim
@property
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 19
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : torch.FloatTensor
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Dict , _a:Any=3 , _a:Tuple=3 , _a:List[str]=("DownEncoderBlock2D",) , _a:Tuple=(64,) , _a:List[str]=2 , _a:Any=32 , _a:List[Any]="silu" , _a:List[Any]=True , ):
super().__init__()
snake_case__ = layers_per_block
snake_case__ = torch.nn.Convad(
_a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ = None
snake_case__ = nn.ModuleList([] )
# down
snake_case__ = block_out_channels[0]
for i, down_block_type in enumerate(_a ):
snake_case__ = output_channel
snake_case__ = block_out_channels[i]
snake_case__ = i == len(_a ) - 1
snake_case__ = get_down_block(
_a , num_layers=self.layers_per_block , in_channels=_a , out_channels=_a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , )
self.down_blocks.append(_a )
# mid
snake_case__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# out
snake_case__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_a , eps=1e-6 )
snake_case__ = nn.SiLU()
snake_case__ = 2 * out_channels if double_z else out_channels
snake_case__ = nn.Convad(block_out_channels[-1] , _a , 3 , padding=1 )
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Any ):
snake_case__ = x
snake_case__ = self.conv_in(_a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a:str ):
def custom_forward(*_a:Any ):
return module(*_a )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) , _a , use_reentrant=_a )
# middle
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , use_reentrant=_a )
else:
for down_block in self.down_blocks:
snake_case__ = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a )
# middle
snake_case__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _a )
else:
# down
for down_block in self.down_blocks:
snake_case__ = down_block(_a )
# middle
snake_case__ = self.mid_block(_a )
# post-process
snake_case__ = self.conv_norm_out(_a )
snake_case__ = self.conv_act(_a )
snake_case__ = self.conv_out(_a )
return sample
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:Optional[Any]=3 , _a:Optional[int]=3 , _a:List[str]=("UpDecoderBlock2D",) , _a:Union[str, Any]=(64,) , _a:str=2 , _a:Optional[int]=32 , _a:Optional[int]="silu" , _a:str="group" , ):
super().__init__()
snake_case__ = layers_per_block
snake_case__ = nn.Convad(
_a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
snake_case__ = None
snake_case__ = nn.ModuleList([] )
snake_case__ = in_channels if norm_type == '''spatial''' else None
# mid
snake_case__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# up
snake_case__ = list(reversed(_a ) )
snake_case__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_a ):
snake_case__ = output_channel
snake_case__ = reversed_block_out_channels[i]
snake_case__ = i == len(_a ) - 1
snake_case__ = get_up_block(
_a , num_layers=self.layers_per_block + 1 , in_channels=_a , out_channels=_a , prev_output_channel=_a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , resnet_time_scale_shift=_a , )
self.up_blocks.append(_a )
snake_case__ = output_channel
# out
if norm_type == "spatial":
snake_case__ = SpatialNorm(block_out_channels[0] , _a )
else:
snake_case__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_a , eps=1e-6 )
snake_case__ = nn.SiLU()
snake_case__ = nn.Convad(block_out_channels[0] , _a , 3 , padding=1 )
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Tuple , _a:List[Any]=None ):
snake_case__ = z
snake_case__ = self.conv_in(_a )
snake_case__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_a:List[str] ):
def custom_forward(*_a:Union[str, Any] ):
return module(*_a )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , _a , use_reentrant=_a )
snake_case__ = sample.to(_a )
# up
for up_block in self.up_blocks:
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) , _a , _a , use_reentrant=_a )
else:
# middle
snake_case__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , _a )
snake_case__ = sample.to(_a )
# up
for up_block in self.up_blocks:
snake_case__ = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a , _a )
else:
# middle
snake_case__ = self.mid_block(_a , _a )
snake_case__ = sample.to(_a )
# up
for up_block in self.up_blocks:
snake_case__ = up_block(_a , _a )
# post-process
if latent_embeds is None:
snake_case__ = self.conv_norm_out(_a )
else:
snake_case__ = self.conv_norm_out(_a , _a )
snake_case__ = self.conv_act(_a )
snake_case__ = self.conv_out(_a )
return sample
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:Dict , _a:Optional[int] , _a:str , _a:Optional[Any] , _a:str=None , _a:Optional[Any]="random" , _a:Union[str, Any]=False , _a:Union[str, Any]=True ):
super().__init__()
snake_case__ = n_e
snake_case__ = vq_embed_dim
snake_case__ = beta
snake_case__ = legacy
snake_case__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
snake_case__ = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
snake_case__ = self.used.shape[0]
snake_case__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
snake_case__ = self.re_embed
snake_case__ = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
snake_case__ = n_e
snake_case__ = sane_index_shape
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[int] ):
snake_case__ = inds.shape
assert len(_a ) > 1
snake_case__ = inds.reshape(ishape[0] , -1 )
snake_case__ = self.used.to(_a )
snake_case__ = (inds[:, :, None] == used[None, None, ...]).long()
snake_case__ = match.argmax(-1 )
snake_case__ = match.sum(2 ) < 1
if self.unknown_index == "random":
snake_case__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
snake_case__ = self.unknown_index
return new.reshape(_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:List[Any] ):
snake_case__ = inds.shape
assert len(_a ) > 1
snake_case__ = inds.reshape(ishape[0] , -1 )
snake_case__ = self.used.to(_a )
if self.re_embed > self.used.shape[0]: # extra token
snake_case__ = 0 # simply set to zero
snake_case__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _a )
return back.reshape(_a )
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Tuple ):
# reshape z -> (batch, height, width, channel) and flatten
snake_case__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
snake_case__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
snake_case__ = torch.argmin(torch.cdist(_a , self.embedding.weight ) , dim=1 )
snake_case__ = self.embedding(_a ).view(z.shape )
snake_case__ = None
snake_case__ = None
# compute loss for embedding
if not self.legacy:
snake_case__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
snake_case__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
snake_case__ = z + (z_q - z).detach()
# reshape back to match original input shape
snake_case__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
snake_case__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
snake_case__ = self.remap_to_used(_a )
snake_case__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
snake_case__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Tuple , _a:Optional[Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
snake_case__ = indices.reshape(shape[0] , -1 ) # add batch axis
snake_case__ = self.unmap_to_all(_a )
snake_case__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
snake_case__ = self.embedding(_a )
if shape is not None:
snake_case__ = z_q.view(_a )
# reshape back to match original input shape
snake_case__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:Optional[Any] , _a:str=False ):
snake_case__ = parameters
snake_case__ , snake_case__ = torch.chunk(_a , 2 , dim=1 )
snake_case__ = torch.clamp(self.logvar , -30.0 , 20.0 )
snake_case__ = deterministic
snake_case__ = torch.exp(0.5 * self.logvar )
snake_case__ = torch.exp(self.logvar )
if self.deterministic:
snake_case__ = snake_case__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
snake_case__ = randn_tensor(
self.mean.shape , generator=_a , device=self.parameters.device , dtype=self.parameters.dtype )
snake_case__ = self.mean + self.std * sample
return x
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int , _a:Optional[int]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
snake_case__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_a )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
return self.mean
| 718
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Tuple = 'xlm-roberta'
def __init__( self:Dict , _a:List[Any]=3_05_22 , _a:Optional[Any]=7_68 , _a:Union[str, Any]=12 , _a:str=12 , _a:Union[str, Any]=30_72 , _a:str="gelu" , _a:List[Any]=0.1 , _a:List[str]=0.1 , _a:Dict=5_12 , _a:Optional[int]=2 , _a:Optional[Any]=0.02 , _a:List[str]=1e-12 , _a:Dict=1 , _a:Optional[Any]=0 , _a:str=2 , _a:Optional[int]="absolute" , _a:List[str]=True , _a:List[Any]=None , **_a:str , ):
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = position_embedding_type
snake_case__ = use_cache
snake_case__ = classifier_dropout
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
if self.task == "multiple-choice":
snake_case__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 208
| 0
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __lt__( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowercase( UpperCamelCase_ ) -> list:
'''simple docstring'''
UpperCamelCase = []
# sort into stacks
for element in collection:
UpperCamelCase = Stack([element] )
UpperCamelCase = bisect_left(UpperCamelCase_ , UpperCamelCase_ )
if i != len(UpperCamelCase_ ):
stacks[i].append(UpperCamelCase_ )
else:
stacks.append(UpperCamelCase_ )
# use a heap-based merge to merge stack efficiently
UpperCamelCase = merge(*(reversed(UpperCamelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 537
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
# Initialise PyTorch model
UpperCamelCase = MobileBertConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase = MobileBertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
UpperCamelCase = load_tf_weights_in_mobilebert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 537
| 1
|
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = 0
while number > 0:
snake_case_ = number % 10
sum_of_digits += last_digit
snake_case_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase__ ( _A = 100 ):
'''simple docstring'''
snake_case_ = factorial(SCREAMING_SNAKE_CASE_ )
snake_case_ = split_and_add(SCREAMING_SNAKE_CASE_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 700
|
def lowerCamelCase__ ( _A = 600851475143 ):
'''simple docstring'''
try:
snake_case_ = int(_A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case_ = 2
snake_case_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ = i
while n % i == 0:
snake_case_ = n // i
i += 1
return int(_A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139
| 0
|
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowercase__ : Dict , lowercase__ : int=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[int]=True , lowercase__ : Union[str, Any]=True , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=True , lowercase__ : List[str]=9_9 , lowercase__ : Optional[Any]=3_2 , lowercase__ : Optional[Any]=2 , lowercase__ : Any=4 , lowercase__ : List[str]=3_7 , lowercase__ : Dict="gelu" , lowercase__ : Dict=0.1 , lowercase__ : Dict=0.1 , lowercase__ : Tuple=5_1_2 , lowercase__ : str=1_6 , lowercase__ : int=2 , lowercase__ : Any=0.0_2 , lowercase__ : Union[str, Any]=3 , lowercase__ : str=4 , lowercase__ : Union[str, Any]=None , ):
__lowercase : str = parent
__lowercase : int = 1_3
__lowercase : List[str] = 7
__lowercase : Any = True
__lowercase : Dict = True
__lowercase : List[str] = True
__lowercase : List[Any] = True
__lowercase : Any = 9_9
__lowercase : Union[str, Any] = 3_8_4
__lowercase : List[Any] = 2
__lowercase : Any = 4
__lowercase : Union[str, Any] = 3_7
__lowercase : List[str] = "gelu"
__lowercase : Tuple = 0.1
__lowercase : Optional[Any] = 0.1
__lowercase : int = 5_1_2
__lowercase : Union[str, Any] = 1_6
__lowercase : List[Any] = 2
__lowercase : Optional[int] = 0.0_2
__lowercase : Optional[int] = 3
__lowercase : List[str] = 4
__lowercase : Any = 1_2_8
__lowercase : str = 2
__lowercase : Tuple = 9
__lowercase : Union[str, Any] = 1
__lowercase : Tuple = None
def snake_case ( self : Tuple ):
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[Any] = None
if self.use_input_mask:
__lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Any = None
if self.use_token_type_ids:
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Tuple = None
__lowercase : Optional[int] = None
__lowercase : str = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Dict = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Tuple , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : str , lowercase__ : Any ):
__lowercase : Union[str, Any] = TFConvBertModel(config=lowercase__ )
__lowercase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : str = [input_ids, input_mask]
__lowercase : str = model(lowercase__ )
__lowercase : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : Any ):
__lowercase : Optional[int] = TFConvBertForMaskedLM(config=lowercase__ )
__lowercase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowercase : str = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Dict , lowercase__ : str , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : int , lowercase__ : str , lowercase__ : Any , lowercase__ : Dict ):
__lowercase : Optional[Any] = self.num_labels
__lowercase : Optional[int] = TFConvBertForSequenceClassification(config=lowercase__ )
__lowercase : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowercase : List[str] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : str , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] ):
__lowercase : List[str] = self.num_choices
__lowercase : Union[str, Any] = TFConvBertForMultipleChoice(config=lowercase__ )
__lowercase : List[Any] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Dict = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[int] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : List[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Dict , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Optional[Any] ):
__lowercase : Dict = self.num_labels
__lowercase : Optional[int] = TFConvBertForTokenClassification(config=lowercase__ )
__lowercase : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowercase : Tuple = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Any , lowercase__ : List[str] , lowercase__ : str , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : int ):
__lowercase : Tuple = TFConvBertForQuestionAnswering(config=lowercase__ )
__lowercase : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowercase : Dict = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : Tuple ):
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : Union[str, Any] = config_and_inputs
__lowercase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Any = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def snake_case ( self : Tuple ):
__lowercase : List[str] = TFConvBertModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case ( self : str ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case ( self : Tuple ):
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def snake_case ( self : Any ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def snake_case ( self : str ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def snake_case ( self : Dict ):
__lowercase ,__lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[Any] = True
__lowercase : Dict = True
if hasattr(lowercase__ , "use_cache" ):
__lowercase : int = True
__lowercase : Tuple = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__lowercase : Union[str, Any] = getattr(self.model_tester , "key_length" , lowercase__ )
for model_class in self.all_model_classes:
__lowercase : Tuple = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : int = model_class(lowercase__ )
__lowercase : Optional[Any] = len(model(lowercase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ , saved_model=lowercase__ )
__lowercase : str = os.path.join(lowercase__ , "saved_model" , "1" )
__lowercase : str = tf.keras.models.load_model(lowercase__ )
__lowercase : Optional[int] = model(lowercase__ )
if self.is_encoder_decoder:
__lowercase : Optional[Any] = outputs["encoder_hidden_states"]
__lowercase : List[Any] = outputs["encoder_attentions"]
else:
__lowercase : Union[str, Any] = outputs["hidden_states"]
__lowercase : List[Any] = outputs["attentions"]
self.assertEqual(len(lowercase__ ) , lowercase__ )
__lowercase : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase__ ) , lowercase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def snake_case ( self : List[Any] ):
__lowercase : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(lowercase__ )
def snake_case ( self : Tuple ):
__lowercase ,__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : int = True
__lowercase : Optional[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
__lowercase : List[str] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
__lowercase : int = getattr(self.model_tester , "key_length" , lowercase__ )
__lowercase : List[str] = getattr(self.model_tester , "key_length" , lowercase__ )
def check_decoder_attentions_output(lowercase__ : int ):
__lowercase : int = len(lowercase__ )
self.assertEqual(out_len % 2 , 0 )
__lowercase : List[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase__ : Union[str, Any] ):
__lowercase : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase : List[str] = True
__lowercase : Any = False
__lowercase : str = model_class(lowercase__ )
__lowercase : Any = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
__lowercase : Optional[Any] = len(lowercase__ )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
if self.is_encoder_decoder:
__lowercase : List[Any] = model_class(lowercase__ )
__lowercase : str = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_decoder_attentions_output(lowercase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase : Optional[int] = True
__lowercase : List[str] = model_class(lowercase__ )
__lowercase : Dict = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
# Check attention is always last and order is fine
__lowercase : List[Any] = True
__lowercase : Union[str, Any] = True
__lowercase : Any = model_class(lowercase__ )
__lowercase : str = model(self._prepare_for_class(lowercase__ , lowercase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) )
self.assertEqual(model.config.output_hidden_states , lowercase__ )
check_encoder_attentions_output(lowercase__ )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Union[str, Any] ):
__lowercase : List[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
__lowercase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase : str = model(lowercase__ )[0]
__lowercase : Dict = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase__ )
__lowercase : Union[str, Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1e-4 )
| 575
|
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
if isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
__lowercase : int = False
if num < 0:
__lowercase : List[Any] = True
__lowercase : List[Any] = -num
__lowercase : list[int] = []
while num > 0:
binary.insert(0, num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCamelCase ) for e in binary )
return "0b" + "".join(str(_lowerCamelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575
| 1
|
from __future__ import annotations
from typing import Any
class _A ( _UpperCamelCase ):
pass
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
UpperCamelCase__ = data
UpperCamelCase__ = None
def __iter__(self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self
UpperCamelCase__ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__a )
yield node.data
UpperCamelCase__ = node.next_node
@property
def _a (self ) -> bool:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__magic_name__ =Node(1)
__magic_name__ =Node(2)
__magic_name__ =Node(3)
__magic_name__ =Node(4)
print(root_node.has_loop) # False
__magic_name__ =root_node.next_node
print(root_node.has_loop) # True
__magic_name__ =Node(5)
__magic_name__ =Node(6)
__magic_name__ =Node(5)
__magic_name__ =Node(6)
print(root_node.has_loop) # False
__magic_name__ =Node(1)
print(root_node.has_loop) # False
| 707
|
from __future__ import annotations
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = get_failure_array(A )
# 2) Step through text searching for pattern
UpperCamelCase__ , UpperCamelCase__ = 0, 0 # index into text, pattern
while i < len(A ):
if pattern[j] == text[i]:
if j == (len(A ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase__ = failure[j - 1]
continue
i += 1
return False
def __UpperCamelCase ( A ):
UpperCamelCase__ = [0]
UpperCamelCase__ = 0
UpperCamelCase__ = 1
while j < len(A ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase__ = failure[i - 1]
continue
j += 1
failure.append(A )
return failure
if __name__ == "__main__":
# Test 1)
__magic_name__ ='''abc1abc12'''
__magic_name__ ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__magic_name__ ='''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__magic_name__ ='''ABABX'''
__magic_name__ ='''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__magic_name__ ='''AAAB'''
__magic_name__ ='''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__magic_name__ ='''abcdabcy'''
__magic_name__ ='''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__magic_name__ ='''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 469
| 0
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Dict = len(SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
A_ : Union[str, Any] = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
A_ : str = run_maze(SCREAMING_SNAKE_CASE , 0 , 0 , SCREAMING_SNAKE_CASE )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
A_ : List[str] = 1
return True
A_ : Tuple = (not i < 0) and (not j < 0) # Check lower bounds
A_ : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A_ : str = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A_ : Optional[Any] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE , i + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j + 1 , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - 1 , SCREAMING_SNAKE_CASE )
):
return True
A_ : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 590
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , '''snapshots''' ) )]
A_ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ , A_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_SCREAMING_SNAKE_CASE )
A_ : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : Union[str, Any] = jax.random.PRNGKey(0 )
A_ : Dict = 4
A_ : str = jax.device_count()
A_ : Union[str, Any] = num_samples * [prompt]
A_ : Optional[int] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
A_ : List[Any] = replicate(_SCREAMING_SNAKE_CASE )
A_ : str = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = shard(_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A_ : Optional[int] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def _snake_case ( self )->Any:
'''simple docstring'''
A_ , A_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=_SCREAMING_SNAKE_CASE )
A_ : str = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : Dict = jax.random.PRNGKey(0 )
A_ : str = 50
A_ : Tuple = jax.device_count()
A_ : List[str] = num_samples * [prompt]
A_ : Optional[int] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
A_ : List[str] = replicate(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = shard(_SCREAMING_SNAKE_CASE )
A_ : Any = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ , A_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
A_ : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : int = jax.random.PRNGKey(0 )
A_ : str = 50
A_ : List[Any] = jax.device_count()
A_ : List[Any] = num_samples * [prompt]
A_ : Tuple = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
A_ : Dict = replicate(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : int = shard(_SCREAMING_SNAKE_CASE )
A_ : str = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ , A_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
A_ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : Tuple = jax.random.PRNGKey(0 )
A_ : Dict = 50
A_ : Optional[Any] = jax.device_count()
A_ : Tuple = num_samples * [prompt]
A_ : Union[str, Any] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
A_ : Optional[Any] = replicate(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = shard(_SCREAMING_SNAKE_CASE )
A_ : int = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
A_ , A_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
A_ : int = scheduler.create_state()
A_ : Optional[int] = scheduler_state
A_ : Dict = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : List[str] = jax.random.PRNGKey(0 )
A_ : Optional[int] = 50
A_ : Any = jax.device_count()
A_ : Any = num_samples * [prompt]
A_ : Optional[Any] = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
A_ : Union[str, Any] = replicate(_SCREAMING_SNAKE_CASE )
A_ : Tuple = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = shard(_SCREAMING_SNAKE_CASE )
A_ : Tuple = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
A_ : List[str] = jax.device_count()
A_ : Any = num_samples * [prompt]
A_ : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
A_ , A_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
A_ : Any = replicate(_SCREAMING_SNAKE_CASE )
A_ : Tuple = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
A_ : Dict = shard(_SCREAMING_SNAKE_CASE )
A_ : int = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A_ : str = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A_ , A_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = replicate(_SCREAMING_SNAKE_CASE )
A_ : Tuple = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = shard(_SCREAMING_SNAKE_CASE )
A_ : Any = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A_ : List[str] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 590
| 1
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(_a )
class __lowercase ( _a ):
def __init__( self , *UpperCamelCase , **UpperCamelCase ) -> str:
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(snake_case_ )
def UpperCamelCase__ ( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase ) -> Dict:
__a , __a = {}, {}
if padding is not None:
__a = padding
if truncation is not None:
__a = truncation
if top_k is not None:
__a = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase ) -> Any:
if isinstance(snake_case_ , (Image.Image, str) ) and isinstance(snake_case_ , snake_case_ ):
__a = {'image': image, 'question': question}
else:
__a = image
__a = super().__call__(snake_case_ , **snake_case_ )
return results
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=False ) -> Tuple:
__a = load_image(inputs['image'] )
__a = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=snake_case_ , truncation=snake_case_ )
__a = self.image_processor(images=snake_case_ , return_tensors=self.framework )
model_inputs.update(snake_case_ )
return model_inputs
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[str]:
__a = self.model(**snake_case_ )
return model_outputs
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase=5 ) -> int:
if top_k > self.model.config.num_labels:
__a = self.model.config.num_labels
if self.framework == "pt":
__a = model_outputs.logits.sigmoid()[0]
__a , __a = probs.topk(snake_case_ )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__a = scores.tolist()
__a = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 720
|
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
with open(os.path.dirname(a_ ) + '/grid.txt' ) as f:
__a = [] # noqa: E741
for _ in range(20 ):
l.append([int(a_ ) for x in f.readline().split()] )
__a = 0
# right
for i in range(20 ):
for j in range(17 ):
__a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a = temp
# down
for i in range(17 ):
for j in range(20 ):
__a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 490
| 0
|
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Tuple = AutoencoderKL
snake_case__ : Optional[int] = "sample"
snake_case__ : Optional[Any] = 1e-2
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = 4
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : List[str] = (3_2, 3_2)
_lowerCamelCase : Any = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_lowerCamelCase : Tuple = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
_lowerCamelCase : List[str] = self.model_class(**__lowerCAmelCase )
model.to(__lowerCAmelCase )
assert not model.is_gradient_checkpointing and model.training
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_lowerCamelCase : List[Any] = torch.randn_like(__lowerCAmelCase )
_lowerCamelCase : Tuple = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_lowerCamelCase : List[Any] = self.model_class(**__lowerCAmelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowerCAmelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_lowerCamelCase : List[Any] = model_a(**__lowerCAmelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_lowerCamelCase : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
_lowerCamelCase : Optional[int] = dict(model.named_parameters() )
_lowerCamelCase : Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
_lowerCamelCase : int = model.to(__lowerCAmelCase )
model.eval()
if torch_device == "mps":
_lowerCamelCase : int = torch.manual_seed(0 )
else:
_lowerCamelCase : str = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
_lowerCamelCase : int = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCamelCase : Optional[int] = image.to(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : str = model(__lowerCAmelCase , sample_posterior=__lowerCAmelCase , generator=__lowerCAmelCase ).sample
_lowerCamelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_lowerCamelCase : str = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
_lowerCamelCase : Optional[int] = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
_lowerCamelCase : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-2 ) )
@slow
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
"""simple docstring"""
return f'''gaussian_noise_s={seed}_shape={"_".join([str(__lowerCAmelCase ) for s in shape] )}.npy'''
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : Tuple=(4, 3, 5_1_2, 5_1_2) , __lowerCAmelCase : Tuple=False ):
"""simple docstring"""
_lowerCamelCase : str = torch.floataa if fpaa else torch.floataa
_lowerCamelCase : List[Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowerCAmelCase , __lowerCAmelCase ) ) ).to(__lowerCAmelCase ).to(__lowerCAmelCase )
return image
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : List[str]="CompVis/stable-diffusion-v1-4" , __lowerCAmelCase : List[str]=False ):
"""simple docstring"""
_lowerCamelCase : Any = '''fp16''' if fpaa else None
_lowerCamelCase : str = torch.floataa if fpaa else torch.floataa
_lowerCamelCase : Any = AutoencoderKL.from_pretrained(
__lowerCAmelCase , subfolder='''vae''' , torch_dtype=__lowerCAmelCase , revision=__lowerCAmelCase , )
model.to(__lowerCAmelCase ).eval()
return model
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int=0 ):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(__lowerCAmelCase )
return torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[4_7, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_sd_vae_model()
_lowerCamelCase : List[str] = self.get_sd_image(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Any = model(__lowerCAmelCase , generator=__lowerCAmelCase , sample_posterior=__lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCamelCase : Optional[int] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCamelCase : Tuple = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[4_7, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
_lowerCamelCase : int = self.get_sd_image(__lowerCAmelCase , fpaa=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , generator=__lowerCAmelCase , sample_posterior=__lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCamelCase : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCamelCase : Optional[Any] = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[4_7, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_sd_vae_model()
_lowerCamelCase : int = self.get_sd_image(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase ).sample
assert sample.shape == image.shape
_lowerCamelCase : Optional[int] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_lowerCamelCase : Tuple = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[1_3, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[3_7, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_sd_vae_model()
_lowerCamelCase : Any = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
_lowerCamelCase : List[Any] = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
_lowerCamelCase : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu()
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[2_7, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[1_6, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : str = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
_lowerCamelCase : int = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Dict = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
_lowerCamelCase : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_lowerCamelCase : Union[str, Any] = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=5E-3 )
@parameterized.expand([(1_3,), (1_6,), (2_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = self.get_sd_vae_model(fpaa=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : List[str] = model.decode(__lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCamelCase : Optional[int] = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-1 )
@parameterized.expand([(1_3,), (1_6,), (3_7,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_sd_vae_model()
_lowerCamelCase : List[str] = self.get_sd_image(__lowerCAmelCase , shape=(3, 4, 6_4, 6_4) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model.decode(__lowerCAmelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_lowerCamelCase : Any = model.decode(__lowerCAmelCase ).sample
assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[3_3, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[4_7, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.get_sd_vae_model()
_lowerCamelCase : Optional[Any] = self.get_sd_image(__lowerCAmelCase )
_lowerCamelCase : List[str] = self.get_generator(__lowerCAmelCase )
with torch.no_grad():
_lowerCamelCase : Tuple = model.encode(__lowerCAmelCase ).latent_dist
_lowerCamelCase : Tuple = dist.sample(generator=__lowerCAmelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_lowerCamelCase : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
_lowerCamelCase : str = torch.tensor(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase )
| 83
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def snake_case_ ( A_ : float, A_ : float, A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = x
_lowerCamelCase : List[Any] = y
for step in range(A_ ): # noqa: B007
_lowerCamelCase : Dict = a * a - b * b + x
_lowerCamelCase : List[str] = 2 * a * b + y
_lowerCamelCase : Any = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def snake_case_ ( A_ : float ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(A_, 1, 1 ) )
def snake_case_ ( A_ : int = 8_00, A_ : int = 6_00, A_ : float = -0.6, A_ : float = 0, A_ : float = 3.2, A_ : int = 50, A_ : bool = True, ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.new('''RGB''', (image_width, image_height) )
_lowerCamelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(A_ ):
for image_y in range(A_ ):
# determine the figure-coordinates based on the image-coordinates
_lowerCamelCase : Optional[Any] = figure_width / image_width * image_height
_lowerCamelCase : List[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowerCamelCase : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowerCamelCase : str = get_distance(A_, A_, A_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowerCamelCase : Dict = get_color_coded_rgb(A_ )
else:
_lowerCamelCase : str = get_black_and_white_rgb(A_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCAmelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 83
| 1
|
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCAmelCase ( UpperCamelCase__ : dict[int, list[int]] ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = len(UpperCamelCase__ ) # No of vertices in graph
__UpperCAmelCase = [0] * n
__UpperCAmelCase = [False] * n
def dfs(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ):
__UpperCAmelCase = True
__UpperCAmelCase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ )
__UpperCAmelCase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__UpperCAmelCase = min(low[at] , low[to] )
__UpperCAmelCase = []
for i in range(UpperCamelCase__ ):
if not visited[i]:
dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654
| 0
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''owlvit_text_model'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=4_9_4_0_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : int=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE__ : int=1_6 , SCREAMING_SNAKE_CASE__ : Dict="quick_gelu" , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=4_9_4_0_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_9_4_0_7 , **SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a : List[Any] = vocab_size
__a : Dict = hidden_size
__a : Optional[Any] = intermediate_size
__a : Any = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Optional[Any] = max_position_embeddings
__a : Dict = hidden_act
__a : str = layer_norm_eps
__a : List[str] = attention_dropout
__a : Optional[int] = initializer_range
__a : List[Any] = initializer_factor
@classmethod
def __lowerCAmelCase ( cls : int , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
__a , __a : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__a : Dict = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = '''owlvit_vision_model'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : int=3_2 , SCREAMING_SNAKE_CASE__ : Tuple="quick_gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : List[str]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1.0 , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a : str = hidden_size
__a : int = intermediate_size
__a : Dict = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = num_channels
__a : Tuple = image_size
__a : str = patch_size
__a : List[str] = hidden_act
__a : Dict = layer_norm_eps
__a : Union[str, Any] = attention_dropout
__a : int = initializer_range
__a : List[str] = initializer_factor
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
__a , __a : int = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
__a : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = '''owlvit'''
__SCREAMING_SNAKE_CASE : str = True
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Any=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=2.6_592 , SCREAMING_SNAKE_CASE__ : Dict=True , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
if text_config is None:
__a : int = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
__a : Optional[int] = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
__a : int = OwlViTTextConfig(**SCREAMING_SNAKE_CASE__ )
__a : str = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE__ )
__a : str = projection_dim
__a : Union[str, Any] = logit_scale_init_value
__a : Union[str, Any] = return_dict
__a : int = 1.0
@classmethod
def __lowerCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
__a , __a : int = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Union[str, Any] = {}
__a : List[str] = text_config
__a : Tuple = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[Any] = copy.deepcopy(self.__dict__ )
__a : Optional[Any] = self.text_config.to_dict()
__a : Dict = self.vision_config.to_dict()
__a : Dict = self.__class__.model_type
return output
class _UpperCamelCase( __lowerCamelCase ):
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return 1e-4
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : "ProcessorMixin" , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , ):
'''simple docstring'''
__a : Tuple = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
__a : Dict = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return {**text_input_dict, **image_input_dict}
@property
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return 1_4
| 47
|
from typing import Any
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# Creates data structures and fill initial step
UpperCamelCase__ : dict = {}
UpperCamelCase__ : dict = {}
for state in states_space:
UpperCamelCase__ : Optional[int] = observations_space[0]
UpperCamelCase__ : Any = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(UpperCamelCase__ ) ):
UpperCamelCase__ : str = observations_space[o]
UpperCamelCase__ : Union[str, Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ : int = ''''''
UpperCamelCase__ : List[str] = -1
for k_state in states_space:
UpperCamelCase__ : Union[str, Any] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ : Tuple = probability
UpperCamelCase__ : Union[str, Any] = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ : Tuple = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ : Optional[Any] = arg_max
# The final observation
UpperCamelCase__ : List[str] = observations_space[len(UpperCamelCase__ ) - 1]
# argmax for given final observation
UpperCamelCase__ : Dict = ''''''
UpperCamelCase__ : Tuple = -1
for k_state in states_space:
UpperCamelCase__ : Any = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ : List[str] = probability
UpperCamelCase__ : Tuple = k_state
UpperCamelCase__ : Any = arg_max
# Process pointers backwards
UpperCamelCase__ : List[Any] = last_state
UpperCamelCase__ : int = []
for o in range(len(UpperCamelCase__ ) - 1 , -1 , -1 ):
result.append(UpperCamelCase__ )
UpperCamelCase__ : int = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_not_empty(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_validate_lists(UpperCamelCase__ , UpperCamelCase__ )
_validate_dicts(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_list(UpperCamelCase__ , '''observations_space''' )
_validate_list(UpperCamelCase__ , '''states_space''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = f'''{var_name} must be a list'''
raise ValueError(UpperCamelCase__ )
else:
for x in _object:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = f'''{var_name} must be a list of strings'''
raise ValueError(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
_validate_dict(UpperCamelCase__ , '''initial_probabilities''' , UpperCamelCase__ )
_validate_nested_dict(UpperCamelCase__ , '''transition_probabilities''' )
_validate_nested_dict(UpperCamelCase__ , '''emission_probabilities''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
_validate_dict(_object , UpperCamelCase__ , UpperCamelCase__ )
for x in _object.values():
_validate_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ):
if not isinstance(_object , UpperCamelCase__ ):
UpperCamelCase__ : List[str] = f'''{var_name} must be a dict'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object ):
UpperCamelCase__ : Dict = f'''{var_name} all keys must be strings'''
raise ValueError(UpperCamelCase__ )
if not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for x in _object.values() ):
UpperCamelCase__ : Optional[Any] = '''nested dictionary ''' if nested else ''''''
UpperCamelCase__ : Optional[Any] = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 285
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE = test_metrics
@require_cpu
def _snake_case ( self : int ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _snake_case ( self : Tuple ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _snake_case ( self : str ):
self.test_metrics.main()
@require_multi_gpu
def _snake_case ( self : int ):
print(f"Found {torch.cuda.device_count()} devices." )
SCREAMING_SNAKE_CASE = ["torchrun", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
| 698
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __a ( A__ : str=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("test" )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=A__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
SCREAMING_SNAKE_CASE = script_name
else:
SCREAMING_SNAKE_CASE = F"--config_file={args.config_file} {script_name}"
SCREAMING_SNAKE_CASE = ["accelerate-launch"] + test_args.split()
SCREAMING_SNAKE_CASE = execute_subprocess_async(A__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __a ( ):
SCREAMING_SNAKE_CASE = test_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
test_command(A__ )
if __name__ == "__main__":
main()
| 698
| 1
|
'''simple docstring'''
import os
import sys
import unittest
UpperCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCamelCase_ = os.path.join(git_repo_path, "src", "diffusers")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = find_backend(' if not is_torch_available():' )
self.assertEqual(A, 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE : Dict = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(A, 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE : Dict = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(A, 'torch_and_transformers_and_onnx' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch', A )
self.assertIn('torch_and_transformers', A )
self.assertIn('flax_and_transformers', A )
self.assertIn('torch_and_transformers_and_onnx', A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel', objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel', objects['flax'] )
self.assertIn('StableDiffusionPipeline', objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline', objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler', objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline', objects['torch_and_transformers_and_onnx'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = create_dummy_object('CONSTANT', '\'torch\'' )
self.assertEqual(A, '\nCONSTANT = None\n' )
SCREAMING_SNAKE_CASE : Any = create_dummy_object('function', '\'torch\'' )
self.assertEqual(
A, '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
SCREAMING_SNAKE_CASE : str = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
SCREAMING_SNAKE_CASE : Any = create_dummy_object('FakeClass', '\'torch\'' )
self.assertEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
SCREAMING_SNAKE_CASE : Tuple = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'], A )
| 28
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
model.train()
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
set_seed(42 )
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE = AdamW(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = AdamW(params=ddp_model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
SCREAMING_SNAKE_CASE = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
SCREAMING_SNAKE_CASE = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
SCREAMING_SNAKE_CASE = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def lowerCamelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE , batch_size=16 )
SCREAMING_SNAKE_CASE = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE , batch_size=16 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 450
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 450
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 569
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = """config.json"""
_lowerCAmelCase = """diffusion_pytorch_model.bin"""
_lowerCAmelCase = """diffusion_flax_model.msgpack"""
_lowerCAmelCase = """model.onnx"""
_lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
_lowerCAmelCase = """weights.pb"""
_lowerCAmelCase = """https://huggingface.co"""
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = """diffusers_modules"""
_lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
_lowerCAmelCase = ["""fp16""", """non-ema"""]
_lowerCAmelCase = """.self_attn"""
| 569
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
UpperCamelCase_ = logging.get_logger(__name__)
@dataclass
class _snake_case ( snake_case__ ):
'''simple docstring'''
A__ : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self: Any ,**lowerCamelCase_: Dict ) -> Optional[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase_ : Optional[Any] = deprecated_arg[3:]
setattr(self ,lowercase_ ,not kwargs.pop(lowercase_ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase_ : List[str] = kwargs.pop("""torchscript""" ,self.torchscript )
UpperCAmelCase_ : List[str] = kwargs.pop("""torch_xla_tpu_print_metrics""" ,self.torch_xla_tpu_print_metrics )
UpperCAmelCase_ : Optional[int] = kwargs.pop("""fp16_opt_level""" ,self.fpaa_opt_level )
super().__init__(**lowercase_ )
A__ : Tuple = field(default=snake_case__ , metadata={"help": "Trace the models using torchscript"} )
A__ : Any = field(default=snake_case__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
A__ : int = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def A__ ( self: Tuple ) -> Any:
requires_backends(self ,["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
UpperCAmelCase_ : int = torch.device("""cpu""" )
UpperCAmelCase_ : Union[str, Any] = 0
elif is_torch_tpu_available():
UpperCAmelCase_ : List[str] = xm.xla_device()
UpperCAmelCase_ : Tuple = 0
else:
UpperCAmelCase_ : Tuple = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCAmelCase_ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def A__ ( self: Dict ) -> Any:
return is_torch_tpu_available() and self.tpu
@property
def A__ ( self: List[str] ) -> int:
requires_backends(self ,["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def A__ ( self: Dict ) -> Dict:
requires_backends(self ,["""torch"""] )
return self._setup_devices[0]
@property
def A__ ( self: List[Any] ) -> Optional[Any]:
requires_backends(self ,["""torch"""] )
return self._setup_devices[1]
@property
def A__ ( self: Optional[int] ) -> Union[str, Any]:
return self.n_gpu > 0
| 704
|
from __future__ import annotations
def lowerCamelCase_ ( _a : int | float | str , _a : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
UpperCAmelCase_ : Tuple = int(_a )
UpperCAmelCase_ : Optional[int] = int(_a )
UpperCAmelCase_ : list[str] = []
for temp in range(int(_a ) ):
series.append(F'''1 / {pow(temp + 1 , int(_a ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = int(input('''Enter the last number (nth term) of the P-Series'''))
UpperCamelCase_ = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 322
| 0
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : str = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowerCAmelCase : Optional[int] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase : Union[str, Any] = jax.device_count()
__lowerCAmelCase : Any = num_samples * [prompt]
__lowerCAmelCase : Any = sd_pipe.prepare_inputs(lowerCAmelCase )
__lowerCAmelCase : List[Any] = replicate(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = shard(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
__lowerCAmelCase : str = jax.random.split(lowerCAmelCase , jax.device_count() )
__lowerCAmelCase : Tuple = sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
__lowerCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCAmelCase : Optional[int] = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCAmelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCAmelCase : Dict = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Dict = """stabilityai/stable-diffusion-2"""
__lowerCAmelCase ,__lowerCAmelCase : Optional[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
__lowerCAmelCase ,__lowerCAmelCase : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
__lowerCAmelCase : Tuple = scheduler_params
__lowerCAmelCase : Optional[int] = """A painting of a squirrel eating a burger"""
__lowerCAmelCase : Optional[Any] = jax.device_count()
__lowerCAmelCase : Union[str, Any] = num_samples * [prompt]
__lowerCAmelCase : List[str] = sd_pipe.prepare_inputs(lowerCAmelCase )
__lowerCAmelCase : Dict = replicate(lowerCAmelCase )
__lowerCAmelCase : Optional[int] = shard(lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
__lowerCAmelCase : List[str] = jax.random.split(lowerCAmelCase , jax.device_count() )
__lowerCAmelCase : str = sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
__lowerCAmelCase : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCAmelCase : Tuple = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCAmelCase : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCAmelCase : Optional[int] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 651
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def snake_case_ (__A : Path , __A : list ) -> List[Any]:
__lowerCAmelCase : Any = """\n""".join(__A )
Path(__A ).open("""w""" ).writelines(__A )
__UpperCAmelCase = """patrickvonplaten/t5-tiny-random"""
__UpperCAmelCase = """sshleifer/bart-tiny-random"""
__UpperCAmelCase = """sshleifer/tiny-mbart"""
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCAmelCase : str = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCAmelCase : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__lowerCAmelCase : Optional[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCAmelCase : Any = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_generate()
assert Path(lowerCAmelCase ).exists()
# os.remove(Path(output_file_name))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.run_eval_tester(lowerCAmelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__lowerCAmelCase : List[str] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__lowerCAmelCase : Union[str, Any] = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__lowerCAmelCase : List[str] = Path(self.get_auto_remove_tmp_dir() )
__lowerCAmelCase : List[str] = str(tmp_dir / """scores.json""" )
__lowerCAmelCase : Dict = str(tmp_dir / """val.target""" )
_dump_articles(lowerCAmelCase , text["""en"""] )
_dump_articles(lowerCAmelCase , text["""de"""] )
__lowerCAmelCase : List[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__lowerCAmelCase : Dict = f'''
run_eval_search.py
{model}
{str(lowerCAmelCase )}
{str(lowerCAmelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
with CaptureStdout() as cs:
run_search()
__lowerCAmelCase : Any = [""" num_beams | length_penalty""", model, """Best score args"""]
__lowerCAmelCase : Tuple = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(lowerCAmelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase ).exists()
os.remove(Path(lowerCAmelCase ) )
| 651
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 255 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Dict , ):
super().__init__(**A )
__snake_case: str = size if size is not None else {"""shortest_edge""": 256}
__snake_case: List[Any] = get_size_dict(A , default_to_square=A )
__snake_case: str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__snake_case: List[str] = get_size_dict(A )
__snake_case: Tuple = do_resize
__snake_case: Any = size
__snake_case: Tuple = resample
__snake_case: Optional[Any] = do_center_crop
__snake_case: Tuple = crop_size
__snake_case: List[str] = do_rescale
__snake_case: Optional[int] = rescale_factor
__snake_case: Optional[int] = do_normalize
__snake_case: List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case: Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self : Dict , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ):
__snake_case: Dict = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__snake_case: Optional[Any] = get_resize_output_image_size(A , size=size["""shortest_edge"""] , default_to_square=A )
return resize(A , size=A , resample=A , data_format=A , **A )
def UpperCAmelCase__ ( self : str , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Optional[Any] , ):
__snake_case: Tuple = get_size_dict(A )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase__ ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Any ):
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase__ ( self : Optional[Any] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ):
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase__ ( self : Any , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : str , ):
__snake_case: Any = do_resize if do_resize is not None else self.do_resize
__snake_case: Union[str, Any] = size if size is not None else self.size
__snake_case: Optional[int] = get_size_dict(A , default_to_square=A )
__snake_case: Union[str, Any] = resample if resample is not None else self.resample
__snake_case: Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case: Optional[int] = crop_size if crop_size is not None else self.crop_size
__snake_case: List[str] = get_size_dict(A )
__snake_case: List[str] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case: int = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case: List[str] = image_mean if image_mean is not None else self.image_mean
__snake_case: Optional[Any] = image_std if image_std is not None else self.image_std
__snake_case: Optional[int] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__snake_case: str = [to_numpy_array(A ) for image in images]
if do_resize:
__snake_case: Optional[int] = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
__snake_case: Dict = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
__snake_case: Optional[int] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
__snake_case: Optional[Any] = [self.normalize(image=A , mean=A , std=A ) for image in images]
__snake_case: List[str] = [to_channel_dimension_format(A , A ) for image in images]
__snake_case: List[str] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 155
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCAmelCase : Optional[Any] = 4
__UpperCAmelCase : str = 3
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
pass
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
for shard in shards:
for i in range(SCREAMING_SNAKE_CASE__):
yield {"i": i, "shard": shard}
def A__ ( ) -> Optional[Any]:
__snake_case: Optional[int] = int(os.environ["""RANK"""])
__snake_case: Dict = int(os.environ["""WORLD_SIZE"""])
__snake_case: Union[str, Any] = ArgumentParser()
parser.add_argument("""--streaming""" , type=SCREAMING_SNAKE_CASE__)
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__)
parser.add_argument("""--num_workers""" , type=SCREAMING_SNAKE_CASE__ , default=0)
__snake_case: Union[str, Any] = parser.parse_args()
__snake_case: Union[str, Any] = args.streaming
__snake_case: Dict = args.num_workers
__snake_case: Optional[Any] = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(SCREAMING_SNAKE_CASE__)]}
__snake_case: Union[str, Any] = IterableDataset.from_generator(SCREAMING_SNAKE_CASE__ , gen_kwargs=SCREAMING_SNAKE_CASE__)
if not streaming:
__snake_case: int = Dataset.from_list(list(SCREAMING_SNAKE_CASE__))
__snake_case: List[str] = split_dataset_by_node(SCREAMING_SNAKE_CASE__ , rank=SCREAMING_SNAKE_CASE__ , world_size=SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = torch.utils.data.DataLoader(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__)
__snake_case: int = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case: str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
__snake_case: Tuple = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 155
| 1
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 501
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCamelCase : List[str] = """<<<<<<< This should probably be modified because it mentions: """
__lowerCamelCase : Optional[int] = """=======
>>>>>>>
"""
__lowerCamelCase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__lowerCamelCase : Optional[int] = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase__ ( _lowerCAmelCase ):
@staticmethod
def __UpperCamelCase ( UpperCamelCase_ : ArgumentParser ) -> str:
"""simple docstring"""
lowerCamelCase_ : int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : str , *UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = get_logger('''datasets-cli/converting''' )
lowerCamelCase_ : Union[str, Any] = tfds_path
lowerCamelCase_ : List[Any] = datasets_directory
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
lowerCamelCase_ : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowerCamelCase_ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowerCamelCase_ : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : str = {}
if os.path.isdir(self._tfds_path ):
lowerCamelCase_ : Any = os.listdir(UpperCamelCase_ )
else:
lowerCamelCase_ : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowerCamelCase_ : Dict = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(UpperCamelCase_ , encoding='''utf-8''' ) as f:
lowerCamelCase_ : int = f.readlines()
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : List[Any] = []
for line in lines:
lowerCamelCase_ : Any = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase_ : Union[str, Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase_ : Union[str, Any] = ''''''
continue
elif "from absl import logging" in out_line:
lowerCamelCase_ : Optional[Any] = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowerCamelCase_ : str = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : List[Any] = list(filter(lambda UpperCamelCase_ : e in out_line , UpperCamelCase_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCamelCase_ ) + '''\n''' )
out_lines.append(UpperCamelCase_ )
out_lines.append(UpperCamelCase_ )
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase_ : Tuple = re.sub(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase_ : Optional[int] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , UpperCamelCase_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowerCamelCase_ : Any = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase_ : Tuple = True
out_lines.append(UpperCamelCase_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase_ : Any = f_name.replace('''.py''' , '''''' )
lowerCamelCase_ : Any = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : List[Any] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(UpperCamelCase_ )
if needs_manual_update:
with_manual_update.append(UpperCamelCase_ )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(UpperCamelCase_ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowerCamelCase_ : str = os.path.basename(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(UpperCamelCase_ , UpperCamelCase_ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 501
| 1
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase__ : Tuple = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
lowerCamelCase__ : Optional[Any] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
lowerCamelCase__ : Optional[int] = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Dict=None , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Tuple=False ) -> Dict:
'''simple docstring'''
if rouge_types is None:
SCREAMING_SNAKE_CASE : Optional[Any] = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
SCREAMING_SNAKE_CASE : int = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase )
if use_aggregator:
SCREAMING_SNAKE_CASE : Any = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE : Optional[int] = []
for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Tuple = scorer.score(__lowerCAmelCase , __lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(__lowerCAmelCase )
else:
scores.append(__lowerCAmelCase )
if use_aggregator:
SCREAMING_SNAKE_CASE : str = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE : int = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE : List[str] = [score[key] for score in scores]
return result
| 706
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 18
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__a = logging.get_logger(__name__)
__a = '''T5Config'''
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->jnp.ndarray:
"""simple docstring"""
lowercase : str = jnp.zeros_like(_UpperCamelCase )
lowercase : Dict = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowercase : Optional[int] = shifted_input_ids.at[:, 0].set(_UpperCamelCase )
lowercase : str = jnp.where(shifted_input_ids == -100, _UpperCamelCase, _UpperCamelCase )
return shifted_input_ids
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Optional[int] = 'mt5'
A : Dict = MTaConfig
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[Any] = 'mt5'
A : List[str] = MTaConfig
class __SCREAMING_SNAKE_CASE ( A__ ):
A : List[str] = 'mt5'
A : Optional[Any] = MTaConfig
| 319
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , ):
lowercase : Optional[Any] = parent
lowercase : Tuple = batch_size
lowercase : Any = image_size
lowercase : Union[str, Any] = patch_size
lowercase : Optional[Any] = num_channels
lowercase : Optional[int] = is_training
lowercase : Dict = use_labels
lowercase : Union[str, Any] = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Optional[Any] = backbone_out_indices
lowercase : Dict = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : str = num_labels
lowercase : str = backbone_featmap_shape
lowercase : Optional[int] = scope
lowercase : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase : List[Any] = (image_size // patch_size) ** 2
lowercase : int = num_patches + 1
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : int = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
lowercase : List[str] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = DPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = self.num_labels
lowercase : Optional[Any] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = self.num_labels
lowercase : List[str] = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs
lowercase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
A : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : str = False
A : Union[str, Any] = False
A : List[Any] = False
def __lowerCamelCase ( self ):
lowercase : List[str] = DPTModelTester(self )
lowercase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __lowerCamelCase ( self ):
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict = model_class(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] = [*signature.parameters.keys()]
lowercase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = True
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
continue
lowercase : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __lowerCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict = False
lowercase : Optional[int] = True
if model_class in get_values(SCREAMING_SNAKE_CASE__ ) or not model_class.supports_gradient_checkpointing:
continue
lowercase : Any = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[Any] = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
lowercase : List[Any] = model_class(config=SCREAMING_SNAKE_CASE__ )
# Skip the check for the backbone
lowercase : Tuple = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase : List[Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase : Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ )
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase : Optional[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Any = prepare_img()
lowercase : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowercase : Any = model(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
lowercase : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 319
| 1
|
'''simple docstring'''
import math
def lowerCamelCase ( lowerCamelCase : int):
A_ : str = math.loga(math.sqrt(4 * positive_integer + 1) / 2 + 1 / 2)
return exponent == int(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : float = 1 / 1_2345):
A_ : int = 0
A_ : Dict = 0
A_ : Tuple = 3
while True:
A_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase):
A_ : Union[str, Any] = int(lowerCamelCase)
total_partitions += 1
if check_partition_perfect(lowerCamelCase):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase)
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721
|
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
| 0
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase="pt" ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {"""add_prefix_space""": True} if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not line.startswith(""" """ ) else {}
__SCREAMING_SNAKE_CASE = padding_side
return tokenizer(
[line] , max_length=__UpperCAmelCase , padding="""max_length""" if pad_to_max_length else None , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input_ids.ne(__UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __a ( _snake_case ):
def __init__( self : List[str] ,lowerCamelCase : Dict ,lowerCamelCase : Tuple ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple ,lowerCamelCase : Tuple="train" ,lowerCamelCase : str=None ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : str=None ,lowerCamelCase : Optional[int]="" ,):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ).joinpath(type_path + """.source""" )
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ).joinpath(type_path + """.target""" )
__SCREAMING_SNAKE_CASE = self.get_char_lens(self.src_file )
__SCREAMING_SNAKE_CASE = max_source_length
__SCREAMING_SNAKE_CASE = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = prefix
if n_obs is not None:
__SCREAMING_SNAKE_CASE = self.src_lens[:n_obs]
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Union[str, Any] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = index + 1 # linecache starts at 1
__SCREAMING_SNAKE_CASE = self.prefix + linecache.getline(str(self.src_file ) ,lowerCamelCase ).rstrip("""\n""" )
__SCREAMING_SNAKE_CASE = linecache.getline(str(self.tgt_file ) ,lowerCamelCase ).rstrip("""\n""" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,lowerCamelCase ) else self.tokenizer
)
__SCREAMING_SNAKE_CASE = self.tokenizer.generator if isinstance(self.tokenizer ,lowerCamelCase ) else self.tokenizer
__SCREAMING_SNAKE_CASE = encode_line(lowerCamelCase ,lowerCamelCase ,self.max_source_length ,"""right""" )
__SCREAMING_SNAKE_CASE = encode_line(lowerCamelCase ,lowerCamelCase ,self.max_target_length ,"""right""" )
__SCREAMING_SNAKE_CASE = source_inputs["""input_ids"""].squeeze()
__SCREAMING_SNAKE_CASE = target_inputs["""input_ids"""].squeeze()
__SCREAMING_SNAKE_CASE = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : List[str] ):
'''simple docstring'''
return [len(lowerCamelCase ) for x in Path(lowerCamelCase ).open().readlines()]
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.stack([x["""input_ids"""] for x in batch] )
__SCREAMING_SNAKE_CASE = torch.stack([x["""attention_mask"""] for x in batch] )
__SCREAMING_SNAKE_CASE = torch.stack([x["""decoder_input_ids"""] for x in batch] )
__SCREAMING_SNAKE_CASE = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase )
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase )
else self.tokenizer.pad_token_id
)
__SCREAMING_SNAKE_CASE = trim_batch(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = trim_batch(lowerCamelCase ,lowerCamelCase ,attention_mask=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
a = getLogger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(__UpperCAmelCase ) )
def __magic_name__ ( __UpperCAmelCase ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_git_info()
save_json(__UpperCAmelCase , os.path.join(__UpperCAmelCase , """git_log.json""" ) )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(__UpperCAmelCase , """w""" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=__UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
with open(__UpperCAmelCase ) as f:
return json.load(__UpperCAmelCase )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {
"""repo_id""": str(__UpperCAmelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List:
'''simple docstring'''
return list(map(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
with open(__UpperCAmelCase , """wb""" ) as f:
return pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
def remove_articles(__UpperCAmelCase ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , __UpperCAmelCase )
def white_space_fix(__UpperCAmelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCAmelCase ) ) ) )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = normalize_answer(__UpperCAmelCase ).split()
__SCREAMING_SNAKE_CASE = normalize_answer(__UpperCAmelCase ).split()
__SCREAMING_SNAKE_CASE = Counter(__UpperCAmelCase ) & Counter(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sum(common.values() )
if num_same == 0:
return 0
__SCREAMING_SNAKE_CASE = 1.0 * num_same / len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 1.0 * num_same / len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return normalize_answer(__UpperCAmelCase ) == normalize_answer(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 0
for hypo, pred in zip(__UpperCAmelCase , __UpperCAmelCase ):
em += exact_match_score(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
em /= len(__UpperCAmelCase )
return {"em": em}
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__SCREAMING_SNAKE_CASE = """dropout_rate"""
for p in extra_params:
if getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if not hasattr(__UpperCAmelCase , __UpperCAmelCase ) and not hasattr(__UpperCAmelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(__UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
continue
__SCREAMING_SNAKE_CASE = p if hasattr(__UpperCAmelCase , __UpperCAmelCase ) else equivalent_param[p]
setattr(__UpperCAmelCase , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
delattr(__UpperCAmelCase , __UpperCAmelCase )
return hparams, config
| 109
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """megatron-bert"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Optional[Any]=2_90_56 , lowerCamelCase_ :Tuple=10_24 , lowerCamelCase_ :str=24 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=40_96 , lowerCamelCase_ :List[Any]="gelu" , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Optional[int]=5_12 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Dict=0.0_2 , lowerCamelCase_ :List[str]=1E-12 , lowerCamelCase_ :str=0 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=True , **lowerCamelCase_ :int , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
| 711
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__:
'''simple docstring'''
def __init__( self :str , lowerCamelCase_ :List[Any] , lowerCamelCase_ :Any=13 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :int=[1, 2, 1] , lowerCamelCase_ :str=[2, 2, 4] , lowerCamelCase_ :str=2 , lowerCamelCase_ :Tuple=2.0 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :str=0.0 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :str=False , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :List[Any]=1E-5 , lowerCamelCase_ :int=True , lowerCamelCase_ :str=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Union[str, Any]=10 , lowerCamelCase_ :List[Any]=8 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Any = embed_dim
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[str] = num_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = window_size
SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE : List[Any] = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = drop_path_rate
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Any = patch_norm
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = is_training
SCREAMING_SNAKE_CASE : List[Any] = scope
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride
def __lowerCAmelCase ( self :Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = SwinvaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCAmelCase ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = SwinvaForMaskedImageModeling(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : List[Any] = SwinvaForMaskedImageModeling(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = SwinvaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __lowerCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = SwinvaModelTester(self )
SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowerCamelCase_ , embed_dim=37 )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def __lowerCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self :int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
SCREAMING_SNAKE_CASE : Tuple = len(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = config.window_size**2
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Any , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
# Swinv2 has a different seq_length
SCREAMING_SNAKE_CASE : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE : Any = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase_ , lowerCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowerCAmelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
self.check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , (padded_height, padded_width) )
def __lowerCAmelCase ( self :str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def __lowerCAmelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = SwinvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCAmelCase ( self :int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = _config_zero_init(lowerCamelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = model_class(config=lowerCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowercase__( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 18
| 0
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int):
if not isinstance(_UpperCAmelCase, _UpperCAmelCase):
raise TypeError('''Input value must be an \'int\' type''')
UpperCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Tuple = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''time_series_transformer'''
_snake_case = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "student_t" , lowerCamelCase__ = "nll" , lowerCamelCase__ = 1 , lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCamelCase__ = "mean" , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 3_2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = 2 , lowerCamelCase__ = True , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 6_4 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = 0.02 , lowerCamelCase__=True , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = prediction_length
UpperCamelCase = context_length or prediction_length
UpperCamelCase = distribution_output
UpperCamelCase = loss
UpperCamelCase = input_size
UpperCamelCase = num_time_features
UpperCamelCase = lags_sequence
UpperCamelCase = scaling
UpperCamelCase = num_dynamic_real_features
UpperCamelCase = num_static_real_features
UpperCamelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase = cardinality
else:
UpperCamelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase__ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
UpperCamelCase = embedding_dimension
else:
UpperCamelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCamelCase = num_parallel_samples
# Transformer architecture configuration
UpperCamelCase = input_size * len(lowerCamelCase__ ) + self._number_of_features
UpperCamelCase = d_model
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_attention_heads
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = decoder_layers
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = use_cache
super().__init__(is_encoder_decoder=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 212
| 1
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_lowerCamelCase )} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
snake_case_ = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
snake_case_ = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
snake_case_ = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
snake_case_ = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
snake_case_ = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
snake_case_ = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class a ( _lowerCamelCase ):
snake_case_ = "train"
snake_case_ = "dev"
class a ( _lowerCamelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self : str , lowercase_ : SquadDataTrainingArguments , lowercase_ : PreTrainedTokenizer , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Split] = Split.train , lowercase_ : Optional[bool] = False , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = "pt" , ):
snake_case_ = args
snake_case_ = is_language_sensitive
snake_case_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_ , lowercase_ ):
try:
snake_case_ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
snake_case_ = mode
# Load data features from cache or dataset file
snake_case_ = '''v2''' if args.version_2_with_negative else '''v1'''
snake_case_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ = cached_features_file + '''.lock'''
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case_ = time.time()
snake_case_ = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ = self.old_features['''features''']
snake_case_ = self.old_features.get('''dataset''' , lowercase_ )
snake_case_ = self.old_features.get('''examples''' , lowercase_ )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
''' future run''' )
else:
if mode == Split.dev:
snake_case_ = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ = self.processor.get_train_examples(args.data_dir )
snake_case_ ,snake_case_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase_ , )
snake_case_ = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , lowercase_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : int ):
return len(self.features )
def __getitem__( self : List[Any] , lowercase_ : Tuple ):
# Convert to Tensors and build dataset
snake_case_ = self.features[i]
snake_case_ = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 708
|
'''simple docstring'''
a : Dict = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : Optional[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : Optional[Any] = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
assert len(str(__UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case_ = year // 100
snake_case_ = (5 * (century % 4) + 2) % 7
snake_case_ = year % 100
snake_case_ = centurian % 12
snake_case_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 593
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a_ :Any = 'facebook/wmt19-en-de'
a_ :Tuple = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a_ :List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a_ :Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
a_ :Any = tokenizer(['Making tiny model'], return_tensors='pt')
a_ :Tuple = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
a_ :str = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 35
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a : Optional[Any] = False
a : str = False
def __magic_name__ ( UpperCamelCase : Namespace ) -> Optional[int]:
return TrainCommand(UpperCamelCase )
class lowercase(_lowercase ):
@staticmethod
def lowercase__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
a__ = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__SCREAMING_SNAKE_CASE , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__SCREAMING_SNAKE_CASE , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__SCREAMING_SNAKE_CASE , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__SCREAMING_SNAKE_CASE , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__SCREAMING_SNAKE_CASE , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__SCREAMING_SNAKE_CASE , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__SCREAMING_SNAKE_CASE , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__SCREAMING_SNAKE_CASE , default=3_2 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__SCREAMING_SNAKE_CASE , default=6_4 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__SCREAMING_SNAKE_CASE , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__SCREAMING_SNAKE_CASE , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
a__ = logging.get_logger('transformers-cli/training' )
a__ = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__SCREAMING_SNAKE_CASE )
a__ = args.output
a__ = args.column_label
a__ = args.column_text
a__ = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
a__ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
a__ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
a__ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = args.validation_split
a__ = args.train_batch_size
a__ = args.valid_batch_size
a__ = args.learning_rate
a__ = args.adam_epsilon
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowercase__ ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 273
| 0
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : str=2 , _UpperCamelCase : Any=56 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : int=True , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[int]=99 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : str=2 , _UpperCamelCase : Optional[int]=7 , _UpperCamelCase : int="gelu_new" , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Any=512 , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : str="block_sparse" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=False , _UpperCamelCase : Dict=2 , _UpperCamelCase : Any=3 , ):
_lowercase: Any = parent
_lowercase: Optional[Any] = batch_size
_lowercase: str = seq_length
_lowercase: List[str] = is_training
_lowercase: str = use_attention_mask
_lowercase: Optional[Any] = use_token_type_ids
_lowercase: Union[str, Any] = use_labels
_lowercase: Optional[int] = vocab_size
_lowercase: Optional[int] = hidden_size
_lowercase: Any = num_hidden_layers
_lowercase: int = num_attention_heads
_lowercase: Optional[Any] = intermediate_size
_lowercase: Any = hidden_act
_lowercase: Dict = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: List[Any] = max_position_embeddings
_lowercase: List[str] = type_vocab_size
_lowercase: List[str] = type_sequence_label_size
_lowercase: str = initializer_range
_lowercase: List[str] = num_choices
_lowercase: Any = rescale_embeddings
_lowercase: str = attention_type
_lowercase: int = use_bias
_lowercase: Dict = block_size
_lowercase: Optional[int] = num_random_blocks
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase: Optional[int] = None
if self.use_attention_mask:
_lowercase: Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase: Optional[int] = None
if self.use_token_type_ids:
_lowercase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_lowercase: Tuple = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : str):
_lowercase: Optional[int] = self.prepare_config_and_inputs()
_lowercase: List[Any] = config_and_inputs
_lowercase: List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : int = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase : List[Any] = False
lowerCamelCase : List[str] = False
def UpperCAmelCase__ ( self : str):
_lowercase: List[str] = FlaxBigBirdModelTester(self)
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Optional[int]):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : List[Any]):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : int):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Union[str, Any]):
super().test_hidden_states_output()
@slow
def UpperCAmelCase__ ( self : Union[str, Any]):
for model_class_name in self.all_model_classes:
_lowercase: Optional[int] = model_class_name.from_pretrained("google/bigbird-roberta-base")
self.assertIsNotNone(_UpperCamelCase)
def UpperCAmelCase__ ( self : Any):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_lowercase: List[str] = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase)
_lowercase: int = model_class(_UpperCamelCase)
@jax.jit
def model_jitted(_UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]=None , **_UpperCamelCase : List[str]):
return model(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase , **_UpperCamelCase)
with self.subTest("JIT Enabled"):
_lowercase: Any = model_jitted(**_UpperCamelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_lowercase: Any = model_jitted(**_UpperCamelCase).to_tuple()
self.assertEqual(len(_UpperCamelCase) , len(_UpperCamelCase))
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase__ ( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]=1e-5 , _UpperCamelCase : Dict="outputs" , _UpperCamelCase : Optional[int]=None):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions"):
return
else:
super().check_pt_flax_outputs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
| 708
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_SCREAMING_SNAKE_CASE : int = get_logger(__name__)
class A :
'''simple docstring'''
lowerCamelCase : Dict = """dummy_data"""
lowerCamelCase : List[Any] = """datasets"""
lowerCamelCase : List[str] = False
def __init__( self : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Union[Version, str] , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[List[Callable]] = None , ):
_lowercase: Dict = 0
_lowercase: Dict = dataset_name
_lowercase: Any = cache_dir
_lowercase: Union[str, Any] = use_local_dummy_data
_lowercase: Tuple = config
# download_callbacks take a single url as input
_lowercase: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowercase: Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowercase: Optional[int] = str(_UpperCamelCase)
# to be downloaded
_lowercase: Dict = None
_lowercase: Dict = None
@property
def UpperCAmelCase__ ( self : str):
if self._dummy_file is None:
_lowercase: Any = self.download_dummy_data()
return self._dummy_file
@property
def UpperCAmelCase__ ( self : Tuple):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def UpperCAmelCase__ ( self : Tuple):
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def UpperCAmelCase__ ( self : int):
_lowercase: List[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowercase: Dict = cached_path(
_UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCamelCase , force_extract=_UpperCamelCase)
return os.path.join(_UpperCamelCase , self.dummy_file_name)
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def UpperCAmelCase__ ( self : List[str]):
if self._bucket_url is None:
_lowercase: Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def UpperCAmelCase__ ( self : Optional[int]):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : str , *_UpperCamelCase : Optional[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowercase: int = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowercase: List[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_UpperCamelCase , _UpperCamelCase):
return self.create_dummy_data_dict(_UpperCamelCase , _UpperCamelCase)
elif isinstance(_UpperCamelCase , (list, tuple)):
return self.create_dummy_data_list(_UpperCamelCase , _UpperCamelCase)
else:
return self.create_dummy_data_single(_UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : Union[str, Any] , *_UpperCamelCase : List[Any]):
return self.download_and_extract(_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Any):
return self.download_and_extract(_UpperCamelCase)
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : int):
return path
def UpperCAmelCase__ ( self : Optional[int]):
return {}
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]):
_lowercase: List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_UpperCamelCase , _UpperCamelCase):
for single_url in single_urls:
download_callback(_UpperCamelCase)
else:
_lowercase: Any = single_urls
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_UpperCamelCase , _UpperCamelCase):
_lowercase: List[str] = [os.path.join(_UpperCamelCase , urllib.parse.quote_plus(Path(_UpperCamelCase).name)) for x in single_urls]
else:
_lowercase: Any = single_urls
_lowercase: List[str] = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(Path(_UpperCamelCase).name))
_lowercase: Union[str, Any] = value
# make sure that values are unique
if all(isinstance(_UpperCamelCase , _UpperCamelCase) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
_lowercase: Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCAmelCase__ ( self : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]):
_lowercase: Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowercase: Dict = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _UpperCamelCase)) for url in data_url)
_lowercase: Tuple = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
_lowercase: List[str] = [data_url[0]] * len(_UpperCamelCase)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowercase: Optional[Any] = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(_UpperCamelCase)
return dummy_data_list
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict):
for download_callback in self.download_callbacks:
download_callback(_UpperCamelCase)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowercase: Tuple = os.path.join(_UpperCamelCase , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(_UpperCamelCase) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCAmelCase__ ( self : Optional[int]):
pass
def UpperCAmelCase__ ( self : List[Any]):
pass
def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : Tuple):
def _iter_archive_members(_UpperCamelCase : Optional[int]):
# this preserves the order of the members inside the ZIP archive
_lowercase: str = Path(self.dummy_file).parent
_lowercase: Tuple = path.relative_to(_UpperCamelCase)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
_lowercase: Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(_UpperCamelCase)
_lowercase: Tuple = Path(_UpperCamelCase)
_lowercase: Optional[int] = _iter_archive_members(_UpperCamelCase) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(_UpperCamelCase).as_posix(), file_path.open("rb")
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Dict):
if not isinstance(_UpperCamelCase , _UpperCamelCase):
_lowercase: List[Any] = [paths]
for path in paths:
if os.path.isfile(_UpperCamelCase):
if os.path.basename(_UpperCamelCase).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_UpperCamelCase):
if os.path.basename(_UpperCamelCase).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(_UpperCamelCase):
if filename.startswith((".", "__")):
continue
yield os.path.join(_UpperCamelCase , _UpperCamelCase)
| 206
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _a ( A__ ):
"""simple docstring"""
snake_case ="""swin2sr"""
snake_case ={
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _snake_case=64 , _snake_case=1 , _snake_case=3 , _snake_case=180 , _snake_case=[6, 6, 6, 6, 6, 6] , _snake_case=[6, 6, 6, 6, 6, 6] , _snake_case=8 , _snake_case=2.0 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case="gelu" , _snake_case=False , _snake_case=0.02 , _snake_case=1E-5 , _snake_case=2 , _snake_case=1.0 , _snake_case="1conv" , _snake_case="pixelshuffle" , **_snake_case , ):
super().__init__(**_snake_case )
_UpperCAmelCase =image_size
_UpperCAmelCase =patch_size
_UpperCAmelCase =num_channels
_UpperCAmelCase =embed_dim
_UpperCAmelCase =depths
_UpperCAmelCase =len(_snake_case )
_UpperCAmelCase =num_heads
_UpperCAmelCase =window_size
_UpperCAmelCase =mlp_ratio
_UpperCAmelCase =qkv_bias
_UpperCAmelCase =hidden_dropout_prob
_UpperCAmelCase =attention_probs_dropout_prob
_UpperCAmelCase =drop_path_rate
_UpperCAmelCase =hidden_act
_UpperCAmelCase =use_absolute_embeddings
_UpperCAmelCase =layer_norm_eps
_UpperCAmelCase =initializer_range
_UpperCAmelCase =upscale
_UpperCAmelCase =img_range
_UpperCAmelCase =resi_connection
_UpperCAmelCase =upsampler
| 408
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : int = get_tests_dir('fixtures/test_sentencepiece.model')
snake_case__ : Dict = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
snake_case__ : str = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =CamembertTokenizer
snake_case =CamembertTokenizerFast
snake_case =True
snake_case =True
def SCREAMING_SNAKE_CASE ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase =CamembertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="<pad>"
_UpperCAmelCase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_snake_case ) , 1004 )
def SCREAMING_SNAKE_CASE ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =CamembertTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCAmelCase =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCAmelCase ="I was born in 92000, and this is falsé."
_UpperCAmelCase =tokenizer.encode(_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCAmelCase =tokenizer.convert_ids_to_tokens(_snake_case )
_UpperCAmelCase =rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase ="I was born in 92000, and this is falsé."
_UpperCAmelCase =tokenizer.tokenize(_snake_case )
_UpperCAmelCase =rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_UpperCAmelCase =self.get_rust_tokenizer()
_UpperCAmelCase =tokenizer.encode(_snake_case )
_UpperCAmelCase =rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self ):
# fmt: off
_UpperCAmelCase ={"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCAmelCase =[
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=_snake_case , )
| 408
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
a_ = logging.getLogger()
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__UpperCamelCase = parser.parse_args()
return args.f
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
__UpperCamelCase = {}
__UpperCamelCase = os.path.join(lowercase_ , '''all_results.json''' )
if os.path.exists(lowercase_ ):
with open(lowercase_ , '''r''' ) as f:
__UpperCamelCase = json.load(lowercase_ )
else:
raise ValueError(F"can't find {path}" )
return results
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
a_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@classmethod
def snake_case ( cls : Tuple ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case ( cls : Optional[Any] ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : int ):
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : str ):
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : int ):
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Dict ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCamelCase = 7 if get_gpu_count() > 1 else 2
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Optional[int] ):
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Dict ):
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : List[str] ):
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : List[str] ):
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''translation_no_trainer''' ) ) )
@slow
def snake_case ( self : List[str] ):
__UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case )
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case ( self : Tuple ):
__UpperCamelCase = self.get_auto_remove_tmp_dir()
__UpperCamelCase = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__UpperCamelCase = get_results(snake_case )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case , '''image_classification_no_trainer''' ) ) )
| 375
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = ["input_features", "is_longer"]
def __init__( self : Dict , snake_case : int=64 , snake_case : Dict=48000 , snake_case : Tuple=480 , snake_case : Optional[Any]=10 , snake_case : Union[str, Any]=1024 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=False , snake_case : float = 0 , snake_case : float = 14000 , snake_case : int = None , snake_case : str = "fusion" , snake_case : str = "repeatpad" , **snake_case : int , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
__UpperCamelCase = top_db
__UpperCamelCase = truncation
__UpperCamelCase = padding
__UpperCamelCase = fft_window_size
__UpperCamelCase = (fft_window_size >> 1) + 1
__UpperCamelCase = hop_length
__UpperCamelCase = max_length_s
__UpperCamelCase = max_length_s * sampling_rate
__UpperCamelCase = sampling_rate
__UpperCamelCase = frequency_min
__UpperCamelCase = frequency_max
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='''htk''' , )
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case ( self : Tuple , snake_case : np.array , snake_case : Optional[np.array] = None ):
__UpperCamelCase = spectrogram(
snake_case , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='''dB''' , )
return log_mel_spectrogram.T
def snake_case ( self : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : int ):
__UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
# randomly choose index for each part
__UpperCamelCase = np.random.choice(ranges[0] )
__UpperCamelCase = np.random.choice(ranges[1] )
__UpperCamelCase = np.random.choice(ranges[2] )
__UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__UpperCamelCase = torch.tensor(mel[None, None, :] )
__UpperCamelCase = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=snake_case )
__UpperCamelCase = mel_shrink[0][0].numpy()
__UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def snake_case ( self : Optional[Any] , snake_case : np.array , snake_case : Optional[int] , snake_case : Tuple , snake_case : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCamelCase = len(snake_case ) - max_length
__UpperCamelCase = np.random.randint(0 , overflow + 1 )
__UpperCamelCase = waveform[idx : idx + max_length]
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCamelCase = False
else:
__UpperCamelCase = self._random_mel_fusion(snake_case , snake_case , snake_case )
__UpperCamelCase = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
__UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCamelCase = int(max_length / len(snake_case ) )
__UpperCamelCase = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCamelCase = int(max_length / len(snake_case ) )
__UpperCamelCase = np.stack(np.tile(snake_case , snake_case ) )
__UpperCamelCase = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case : str = None , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Any , ):
__UpperCamelCase = truncation if truncation is not None else self.truncation
__UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCamelCase = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
__UpperCamelCase = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
__UpperCamelCase = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCamelCase = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
__UpperCamelCase = []
__UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCamelCase = np.random.randint(0 , len(snake_case ) )
__UpperCamelCase = True
if isinstance(input_mel[0] , snake_case ):
__UpperCamelCase = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCamelCase = [[longer] for longer in is_longer]
__UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
__UpperCamelCase = BatchFeature(snake_case )
if return_tensors is not None:
__UpperCamelCase = input_features.convert_to_tensors(snake_case )
return input_features
| 375
| 1
|
from collections.abc import Generator
from math import sin
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) != 32:
raise ValueError("""Input must be of length 32""" )
__lowercase = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
__lowercase = format(lowerCamelCase , """08x""" )[-8:]
__lowercase = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = b""""""
for char in message:
bit_string += format(lowerCamelCase , """08b""" ).encode("""utf-8""" )
__lowercase = format(len(lowerCamelCase ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(lowerCamelCase ) , 512 ):
__lowercase = bit_string[pos : pos + 512]
__lowercase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
__lowercase = format(lowerCamelCase , """032b""" )
__lowercase = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(lowerCamelCase , 2 )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = preprocess(lowerCamelCase )
__lowercase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowercase = 0x67452301
__lowercase = 0xefcdab89
__lowercase = 0x98badcfe
__lowercase = 0x10325476
__lowercase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(lowerCamelCase ):
__lowercase = aa
__lowercase = ba
__lowercase = ca
__lowercase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowercase = d ^ (b & (c ^ d))
__lowercase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowercase = c ^ (d & (b ^ c))
__lowercase = (5 * i + 1) % 16
elif i <= 47:
__lowercase = b ^ c ^ d
__lowercase = (3 * i + 5) % 16
else:
__lowercase = c ^ (b | not_aa(lowerCamelCase ))
__lowercase = (7 * i) % 16
__lowercase = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowercase = d
__lowercase = c
__lowercase = b
__lowercase = sum_aa(lowerCamelCase , left_rotate_aa(lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowercase = sum_aa(lowerCamelCase , lowerCamelCase )
__lowercase = sum_aa(lowerCamelCase , lowerCamelCase )
__lowercase = sum_aa(lowerCamelCase , lowerCamelCase )
__lowercase = sum_aa(lowerCamelCase , lowerCamelCase )
__lowercase = reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase ) + reformat_hex(lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[str] = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
__UpperCamelCase : Tuple = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__UpperCamelCase : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
__UpperCamelCase : List[Any] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
__UpperCamelCase : List[str] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
__UpperCamelCase : int = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
__UpperCamelCase : str = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
__UpperCamelCase : Optional[int] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
__UpperCamelCase : Dict = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
__UpperCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__UpperCamelCase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__UpperCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__UpperCamelCase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__UpperCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__UpperCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__UpperCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__UpperCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Tuple = FLAX_MODEL_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__UpperCamelCase : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :List[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : Dict = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__UpperCamelCase : Union[str, Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__UpperCamelCase : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__UpperCamelCase : int = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCamelCase : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class __UpperCamelCase ( _BaseAutoModelClass ):
__snake_case :Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__UpperCamelCase : str = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 80
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Dict = '''gpt_neo'''
lowercase_ : Tuple = ['''past_key_values''']
lowercase_ : List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : List[str] , __lowerCAmelCase : Optional[int]=5_0_2_5_7 , __lowerCAmelCase : Tuple=2_0_4_8 , __lowerCAmelCase : str=2_0_4_8 , __lowerCAmelCase : Optional[Any]=2_4 , __lowerCAmelCase : Optional[Any]=[[["global", "local"], 1_2]] , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Any=2_5_6 , __lowerCAmelCase : str="gelu_new" , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : List[Any]=1E-5 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : int=True , __lowerCAmelCase : Tuple=5_0_2_5_6 , __lowerCAmelCase : Any=5_0_2_5_6 , **__lowerCAmelCase : Optional[int] , ):
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_layers
__snake_case = num_heads
__snake_case = intermediate_size
__snake_case = window_size
__snake_case = activation_function
__snake_case = resid_dropout
__snake_case = embed_dropout
__snake_case = attention_dropout
__snake_case = classifier_dropout
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = attention_types
__snake_case = self.expand_attention_types_params(__lowerCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@staticmethod
def lowercase__ ( __lowerCAmelCase : Optional[Any] ):
__snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase__ ( a , a , a , a ):
import torch
__snake_case = input.size()
__snake_case = len(a )
__snake_case = shape[dimension]
__snake_case = torch.arange(0 , a , a )
__snake_case = torch.div(sizedim - size , a , rounding_mode='floor' ) + 1
__snake_case = torch.arange(a ) + low_indices[:min_length][:, None]
__snake_case = [slice(a )] * rank
__snake_case = indices
__snake_case = input[s]
__snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(a )
def lowerCamelCase__ ( a , a ):
import torch
__snake_case = torch.arange(1 , a )
__snake_case = torch.remainder(a , a )
__snake_case = remainders == 0
__snake_case = candidates[divisor_indices]
__snake_case = torch.max(a )
return largest_divisor, torch.div(a , a , rounding_mode='floor' )
class a_ ( UpperCAmelCase__ ):
@property
def lowercase__ ( self : Optional[Any] ):
__snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='inputs' )
__snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowercase__ ( self : Tuple ):
return self._config.num_heads
def lowercase__ ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
__snake_case = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__snake_case , __snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
__snake_case = common_inputs['attention_mask']
if self.use_past:
__snake_case = ordered_inputs['attention_mask'].dtype
__snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : str ):
return 1_3
| 427
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any=1_3 , __lowerCAmelCase : Any=3_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : int=2 , __lowerCAmelCase : str=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=1_0 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : int=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def lowercase__ ( self : Union[str, Any] ):
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
__snake_case = TFViTModel(config=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__snake_case = self.image_size // 2
__snake_case = pixel_values[:, :, :image_size, :image_size]
__snake_case = model(__lowerCAmelCase , interpolate_pos_encoding=__lowerCAmelCase , training=__lowerCAmelCase )
__snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ):
__snake_case = self.type_sequence_label_size
__snake_case = TFViTForImageClassification(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__snake_case = self.image_size // 2
__snake_case = pixel_values[:, :, :image_size, :image_size]
__snake_case = model(__lowerCAmelCase , interpolate_pos_encoding=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = TFViTForImageClassification(__lowerCAmelCase )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : List[str] ):
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase_ : Optional[Any] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : Optional[int] = False
lowercase_ : Optional[int] = False
lowercase_ : Optional[Any] = False
def lowercase__ ( self : int ):
__snake_case = TFViTModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase__ ( self : Tuple ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Any ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__lowerCAmelCase )
__snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowercase__ ( self : Dict ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def lowercase__ ( self : Dict ):
__snake_case = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase__ ( ):
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Union[str, Any] ):
__snake_case = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=__lowerCAmelCase , return_tensors='tf' )
# forward pass
__snake_case = model(**__lowerCAmelCase )
# verify the logits
__snake_case = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__snake_case = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
| 427
| 1
|
from __future__ import annotations
from collections.abc import Callable
lowerCAmelCase_ = list[list[float | int]]
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Matrix:
"""simple docstring"""
snake_case_ : int = len(_UpperCamelCase )
snake_case_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_UpperCamelCase )]
snake_case_ : int
snake_case_ : int
snake_case_ : int
snake_case_ : int
snake_case_ : int
snake_case_ : float
for row in range(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
snake_case_ : int = matrix[row][col]
snake_case_ : int = vector[row][0]
snake_case_ : str = 0
snake_case_ : List[str] = 0
while row < size and col < size:
# pivoting
snake_case_ : Any = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_UpperCamelCase , _UpperCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case_ , snake_case_ : int = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _UpperCamelCase ):
snake_case_ : Tuple = augmented[rowa][col] / augmented[row][col]
snake_case_ : str = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _UpperCamelCase ):
for row in range(_UpperCamelCase ):
snake_case_ : Tuple = augmented[row][col] / augmented[col][col]
for cola in range(_UpperCamelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_UpperCamelCase )
]
def lowerCamelCase_ ( _UpperCamelCase ) -> Callable[[int], int]:
"""simple docstring"""
snake_case_ : int = len(_UpperCamelCase )
snake_case_ : Matrix = [[0 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
snake_case_ : Matrix = [[0] for _ in range(_UpperCamelCase )]
snake_case_ : Matrix
snake_case_ : int
snake_case_ : int
snake_case_ : int
for x_val, y_val in enumerate(_UpperCamelCase ):
for col in range(_UpperCamelCase ):
snake_case_ : str = (x_val + 1) ** (size - col - 1)
snake_case_ : Optional[int] = y_val
snake_case_ : Any = solve(_UpperCamelCase , _UpperCamelCase )
def interpolated_func(_UpperCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_UpperCamelCase ) )
return interpolated_func
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase_ ( _UpperCamelCase = question_function , _UpperCamelCase = 10 ) -> int:
"""simple docstring"""
snake_case_ : list[int] = [func(_UpperCamelCase ) for x_val in range(1 , order + 1 )]
snake_case_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case_ : int = 0
snake_case_ : Callable[[int], int]
snake_case_ : int
for poly in polynomials:
snake_case_ : List[Any] = 1
while func(_UpperCamelCase ) == poly(_UpperCamelCase ):
x_val += 1
ret += poly(_UpperCamelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60
|
import comet # From: unbabel-comet
import torch
import datasets
SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
SCREAMING_SNAKE_CASE = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
SCREAMING_SNAKE_CASE = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def snake_case_ ( self , __A ):
if self.config_name == "default":
__a = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
__a = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def snake_case_ ( self , __A , __A , __A , __A=None , __A=False ):
if gpus is None:
__a = 1 if torch.cuda.is_available() else 0
__a = {"""src""": sources, """mt""": predictions, """ref""": references}
__a = [dict(zip(__A , __A ) ) for t in zip(*data.values() )]
__a , __a = self.scorer.predict(__A , gpus=__A , progress_bar=__A )
return {"mean_score": mean_score, "scores": scores}
| 99
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase_ : Tuple = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase_ : Union[str, Any] = BASE_URL + """/user"""
# https://github.com/settings/tokens
UpperCamelCase_ : List[str] = os.environ.get("""USER_TOKEN""", """""")
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = {
"Authorization": F'token {auth_token}',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"{key}: {value}")
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 394
|
'''simple docstring'''
def _lowerCAmelCase (_lowercase , _lowercase = " " ):
"""simple docstring"""
a__ = []
a__ = 0
for index, char in enumerate(_lowercase ):
if char == separator:
split_words.append(string[last_index:index] )
a__ = index + 1
elif index + 1 == len(_lowercase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 394
| 1
|
import logging
from transformers import PretrainedConfig
UpperCAmelCase_ = logging.getLogger(__name__)
UpperCAmelCase_ = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """bertabs"""
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=512 , _UpperCamelCase=6 , _UpperCamelCase=512 , _UpperCamelCase=8 , _UpperCamelCase=512 , _UpperCamelCase=0.2 , _UpperCamelCase=6 , _UpperCamelCase=768 , _UpperCamelCase=8 , _UpperCamelCase=2048 , _UpperCamelCase=0.2 , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_pos
_UpperCAmelCase = enc_layers
_UpperCAmelCase = enc_hidden_size
_UpperCAmelCase = enc_heads
_UpperCAmelCase = enc_ff_size
_UpperCAmelCase = enc_dropout
_UpperCAmelCase = dec_layers
_UpperCAmelCase = dec_hidden_size
_UpperCAmelCase = dec_heads
_UpperCAmelCase = dec_ff_size
_UpperCAmelCase = dec_dropout
| 32
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self ):
__magic_name__ , __magic_name__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
__magic_name__ = replicate(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
__magic_name__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
__magic_name__ = '''stabilityai/stable-diffusion-2'''
__magic_name__ , __magic_name__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
__magic_name__ , __magic_name__ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
__magic_name__ = scheduler_params
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
__magic_name__ = replicate(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
__magic_name__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 490
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase__ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase__ = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _snake_case ( ):
_lowerCamelCase : List[str] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_lowerCamelCase : Any = bs[:]
_lowerCamelCase : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
_lowerCamelCase : Dict = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = set()
_lowerCamelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : Any = char
return pairs
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase , lowercase , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , **lowercase , ):
_lowerCamelCase : Dict = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
_lowerCamelCase : List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
_lowerCamelCase : Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
_lowerCamelCase : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
_lowerCamelCase : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else unk_token
_lowerCamelCase : List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCamelCase : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
errors=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , **lowercase , )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
_lowerCamelCase : List[str] = json.load(lowercase )
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Dict = errors # how to handle errors in decoding
_lowerCamelCase : Tuple = bytes_to_unicode()
_lowerCamelCase : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase , encoding='utf-8' ) as merges_handle:
_lowerCamelCase : int = merges_handle.read().split('\n' )[1:-1]
_lowerCamelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
_lowerCamelCase : Tuple = dict(zip(lowercase , range(len(lowercase ) ) ) )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCamelCase : str = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowercase ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase : List[Any] = tuple(lowercase )
_lowerCamelCase : List[Any] = get_pairs(lowercase )
if not pairs:
return token
while True:
_lowerCamelCase : Union[str, Any] = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase, _lowerCamelCase : Optional[int] = bigram
_lowerCamelCase : Any = []
_lowerCamelCase : List[Any] = 0
while i < len(lowercase ):
try:
_lowerCamelCase : str = word.index(lowercase , lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase : str = j
if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase : Tuple = tuple(lowercase )
_lowerCamelCase : Tuple = new_word
if len(lowercase ) == 1:
break
else:
_lowerCamelCase : List[str] = get_pairs(lowercase )
_lowerCamelCase : List[Any] = ' '.join(lowercase )
_lowerCamelCase : str = word
return word
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[int] = []
for token in re.findall(self.pat , lowercase ):
_lowerCamelCase : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase ).split(' ' ) )
return bpe_tokens
def A_ ( self , lowercase ):
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowercase ):
return self.decoder.get(lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : List[str] = ''.join(lowercase )
_lowerCamelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def A_ ( self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Dict = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Any = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + '\n' )
_lowerCamelCase : List[Any] = 0
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowerCamelCase : Optional[Any] = token_index
writer.write(' '.join(lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
def A_ ( self , lowercase , lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : Any = [self.cls_token_id]
_lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Dict = [self.sep_token_id]
_lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A_ ( self , lowercase , lowercase=False , **lowercase ):
_lowerCamelCase : Union[str, Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase ) > 0 and not text[0].isspace()):
_lowerCamelCase : int = ' ' + text
return (text, kwargs)
def A_ ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ):
_lowerCamelCase : Dict = super()._pad(
encoded_inputs=lowercase , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
# Load from model defaults
if return_attention_mask is None:
_lowerCamelCase : List[str] = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_lowerCamelCase : List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_lowerCamelCase : Any = len(encoded_inputs['global_attention_mask'] ) != len(lowercase )
if needs_to_be_padded:
_lowerCamelCase : int = len(lowercase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_lowerCamelCase : str = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_lowerCamelCase : Union[str, Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 492
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=1 / 255 , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : int = num_channels
_lowerCamelCase : Union[str, Any] = min_resolution
_lowerCamelCase : List[Any] = max_resolution
_lowerCamelCase : Union[str, Any] = do_resize
_lowerCamelCase : str = size
_lowerCamelCase : List[str] = do_rescale
_lowerCamelCase : Optional[int] = rescale_factor
_lowerCamelCase : str = do_normalize
_lowerCamelCase : Optional[int] = image_mean
_lowerCamelCase : Tuple = image_std
_lowerCamelCase : Tuple = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def A_ ( self , lowercase , lowercase=False ):
if not batched:
_lowerCamelCase : Union[str, Any] = image_inputs[0]
if isinstance(lowercase , Image.Image ):
_lowerCamelCase, _lowerCamelCase : Optional[Any] = image.size
else:
_lowerCamelCase, _lowerCamelCase : List[str] = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase : List[Any] = int(self.size['shortest_edge'] * h / w )
_lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
elif w > h:
_lowerCamelCase : List[str] = self.size['shortest_edge']
_lowerCamelCase : Optional[Any] = int(self.size['shortest_edge'] * w / h )
else:
_lowerCamelCase : int = self.size['shortest_edge']
_lowerCamelCase : Optional[Any] = self.size['shortest_edge']
else:
_lowerCamelCase : Union[str, Any] = []
for image in image_inputs:
_lowerCamelCase, _lowerCamelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : Tuple = max(lowercase , key=lambda lowercase : item[0] )[0]
_lowerCamelCase : Any = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = DetrImageProcessor if is_vision_available() else None
def A_ ( self ):
_lowerCamelCase : int = DetrImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'image_mean' ) )
self.assertTrue(hasattr(lowercase , 'image_std' ) )
self.assertTrue(hasattr(lowercase , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase , 'rescale_factor' ) )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'do_pad' ) )
def A_ ( self ):
_lowerCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
_lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , lowercase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase, _lowerCamelCase : str = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
_lowerCamelCase : Union[str, Any] = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : str = image_processing(lowercase , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : Dict = image_processing(lowercase , return_tensors='pt' ).pixel_values
_lowerCamelCase, _lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
_lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_lowerCamelCase : int = json.loads(f.read() )
_lowerCamelCase : str = {'image_id': 39769, 'annotations': target}
# encode them
_lowerCamelCase : str = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
_lowerCamelCase : int = image_processing(images=lowercase , annotations=lowercase , return_tensors='pt' )
# verify pixel values
_lowerCamelCase : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_lowerCamelCase : List[str] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_lowerCamelCase : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_lowerCamelCase : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_lowerCamelCase : Tuple = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_lowerCamelCase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_lowerCamelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_lowerCamelCase : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify orig_size
_lowerCamelCase : Optional[int] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_lowerCamelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
_lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_lowerCamelCase : List[Any] = json.loads(f.read() )
_lowerCamelCase : str = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_lowerCamelCase : List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_lowerCamelCase : List[Any] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
_lowerCamelCase : Dict = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors='pt' )
# verify pixel values
_lowerCamelCase : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , lowercase )
_lowerCamelCase : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase , atol=1E-4 ) )
# verify area
_lowerCamelCase : Optional[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase ) )
# verify boxes
_lowerCamelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase )
_lowerCamelCase : Union[str, Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase , atol=1E-3 ) )
# verify image_id
_lowerCamelCase : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase ) )
# verify is_crowd
_lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase ) )
# verify class_labels
_lowerCamelCase : List[str] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase ) )
# verify masks
_lowerCamelCase : List[str] = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase )
# verify orig_size
_lowerCamelCase : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase ) )
# verify size
_lowerCamelCase : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase ) )
| 492
| 1
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =TapasConfig.from_json_file(lowerCAmelCase_ )
# set absolute/relative position embeddings parameter
SCREAMING_SNAKE_CASE_ : Optional[Any] =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
SCREAMING_SNAKE_CASE_ : Optional[Any] =TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WTQ":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE_ : int =4
SCREAMING_SNAKE_CASE_ : Any =True
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE_ : Optional[int] =0.66_4694
SCREAMING_SNAKE_CASE_ : int =0.20_7951
SCREAMING_SNAKE_CASE_ : List[Any] =0.12_1194
SCREAMING_SNAKE_CASE_ : Optional[int] =True
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : List[str] =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0.035_2513
SCREAMING_SNAKE_CASE_ : Any =TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE_ : Union[str, Any] =4
SCREAMING_SNAKE_CASE_ : str =False
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE_ : Optional[Any] =36.4519
SCREAMING_SNAKE_CASE_ : Optional[Any] =0.90_3421
SCREAMING_SNAKE_CASE_ : Tuple =222.088
SCREAMING_SNAKE_CASE_ : Union[str, Any] =True
SCREAMING_SNAKE_CASE_ : int =True
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : str =0.76_3141
SCREAMING_SNAKE_CASE_ : List[str] =TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "TABFACT":
SCREAMING_SNAKE_CASE_ : Optional[Any] =TapasForSequenceClassification(config=lowerCAmelCase_ )
elif task == "MLM":
SCREAMING_SNAKE_CASE_ : Union[str, Any] =TapasForMaskedLM(config=lowerCAmelCase_ )
elif task == "INTERMEDIATE_PRETRAINING":
SCREAMING_SNAKE_CASE_ : List[Any] =TapasModel(config=lowerCAmelCase_ )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
SCREAMING_SNAKE_CASE_ : List[Any] =TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' ,model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase_ )
print('Used relative position embeddings:' ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 220
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase = Features({'text': Value('string' )} )
_lowercase = Features({'labels': ClassLabel} )
_lowercase = "text"
_lowercase = "labels"
def __lowerCamelCase ( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE_ : Optional[int] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : Any =self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : Any =features[self.label_column]
SCREAMING_SNAKE_CASE_ : int =label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 220
| 1
|
from math import sqrt
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( _A = 10_001 ):
"""simple docstring"""
a_ = 0
a_ = 1
while count != nth and number < 3:
number += 1
if is_prime(_A ):
count += 1
while count != nth:
number += 2
if is_prime(_A ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 143
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowercase ( a__ ):
_lowerCAmelCase = ""
_lowerCAmelCase = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : int , lowercase__ : Optional[DatasetInfo] = None , lowercase__ : Optional[str] = None , **lowercase__ : List[Any] , ):
super().__init__(self , **lowercase__ )
a_ = repo_info
a_ = token
a_ = None
def __magic_name__ ( self : List[str] ):
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(lowercase__ ): {'''name''': str(lowercase__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __magic_name__ ( self : Dict , lowercase__ : str , lowercase__ : str = "rb" , **lowercase__ : Dict , ):
if not isinstance(self.repo_info , lowercase__ ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
a_ = hf_hub_url(self.repo_info.id , lowercase__ , revision=self.repo_info.sha )
return fsspec.open(
lowercase__ , mode=lowercase__ , headers=get_authentication_headers_for_url(lowercase__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def __magic_name__ ( self : Optional[Any] , lowercase__ : Dict , **lowercase__ : Optional[int] ):
self._get_dirs()
a_ = self._strip_protocol(lowercase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowercase__ )
def __magic_name__ ( self : Dict , lowercase__ : Dict , lowercase__ : List[Any]=False , **lowercase__ : Tuple ):
self._get_dirs()
a_ = PurePosixPath(path.strip('''/''' ) )
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip('''/''' ) )
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 143
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373
| 1
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
UpperCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
UpperCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE__ , hypotheses=SCREAMING_SNAKE_CASE__ , min_len=SCREAMING_SNAKE_CASE__ , max_len=SCREAMING_SNAKE_CASE__ )
}
| 562
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 562
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """autoformer"""
UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self :List[Any] , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None , __snake_case :str = "student_t" , __snake_case :str = "nll" , __snake_case :int = 1 , __snake_case :List[int] = [1, 2, 3, 4, 5, 6, 7] , __snake_case :bool = True , __snake_case :int = 0 , __snake_case :int = 0 , __snake_case :int = 0 , __snake_case :int = 0 , __snake_case :Optional[List[int]] = None , __snake_case :Optional[List[int]] = None , __snake_case :int = 64 , __snake_case :int = 2 , __snake_case :int = 2 , __snake_case :int = 2 , __snake_case :int = 2 , __snake_case :int = 32 , __snake_case :int = 32 , __snake_case :str = "gelu" , __snake_case :float = 0.1 , __snake_case :float = 0.1 , __snake_case :float = 0.1 , __snake_case :float = 0.1 , __snake_case :float = 0.1 , __snake_case :int = 1_00 , __snake_case :float = 0.02 , __snake_case :bool = True , __snake_case :List[Any]=True , __snake_case :int = 10 , __snake_case :int = 25 , __snake_case :int = 3 , **__snake_case :int , ):
'''simple docstring'''
__magic_name__ : Tuple =prediction_length
__magic_name__ : Any =context_length if context_length is not None else prediction_length
__magic_name__ : Union[str, Any] =distribution_output
__magic_name__ : int =loss
__magic_name__ : Dict =input_size
__magic_name__ : int =num_time_features
__magic_name__ : List[Any] =lags_sequence
__magic_name__ : Optional[int] =scaling
__magic_name__ : Optional[int] =num_dynamic_real_features
__magic_name__ : str =num_static_real_features
__magic_name__ : Optional[Any] =num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__magic_name__ : List[Any] =cardinality
else:
__magic_name__ : Optional[int] =[0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__magic_name__ : Any =embedding_dimension
else:
__magic_name__ : List[str] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__magic_name__ : Any =num_parallel_samples
# Transformer architecture configuration
__magic_name__ : Union[str, Any] =input_size * len(self.lags_sequence ) + self._number_of_features
__magic_name__ : str =d_model
__magic_name__ : Optional[int] =encoder_attention_heads
__magic_name__ : Tuple =decoder_attention_heads
__magic_name__ : int =encoder_ffn_dim
__magic_name__ : List[str] =decoder_ffn_dim
__magic_name__ : List[Any] =encoder_layers
__magic_name__ : Dict =decoder_layers
__magic_name__ : str =dropout
__magic_name__ : str =attention_dropout
__magic_name__ : Optional[int] =activation_dropout
__magic_name__ : int =encoder_layerdrop
__magic_name__ : Optional[int] =decoder_layerdrop
__magic_name__ : List[str] =activation_function
__magic_name__ : str =init_std
__magic_name__ : Optional[int] =use_cache
# Autoformer
__magic_name__ : Any =label_length
__magic_name__ : Tuple =moving_average
__magic_name__ : Dict =autocorrelation_factor
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def A__ ( self :List[Any] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 21
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowercase ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Tuple ):
lowercase_ : Any = OmegaConf.load(__snake_case )
lowercase_ : Dict = torch.load(__snake_case , map_location='''cpu''' )['''model''']
lowercase_ : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase_ : List[str] = {}
lowercase_ : str = '''first_stage_model.'''
for key in keys:
if key.startswith(__snake_case ):
lowercase_ : Optional[int] = state_dict[key]
# extract state_dict for UNetLDM
lowercase_ : str = {}
lowercase_ : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(__snake_case ):
lowercase_ : Any = state_dict[key]
lowercase_ : Optional[int] = config.model.params.first_stage_config.params
lowercase_ : List[Any] = config.model.params.unet_config.params
lowercase_ : Any = VQModel(**__snake_case ).eval()
vqvae.load_state_dict(__snake_case )
lowercase_ : Optional[int] = UNetLDMModel(**__snake_case ).eval()
unet.load_state_dict(__snake_case )
lowercase_ : str = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__snake_case , )
lowercase_ : Dict = LDMPipeline(__snake_case , __snake_case , __snake_case )
pipeline.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__A : int = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 231
| 0
|
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : int ) -> list[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = 2
__SCREAMING_SNAKE_CASE: List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCamelCase__ )
if n > 1:
factors.append(UpperCamelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=64 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = parent
__SCREAMING_SNAKE_CASE: str = batch_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE: List[str] = is_training
__SCREAMING_SNAKE_CASE: str = use_input_mask
__SCREAMING_SNAKE_CASE: Optional[int] = use_token_type_ids
__SCREAMING_SNAKE_CASE: int = use_labels
__SCREAMING_SNAKE_CASE: Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE: Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE: int = embedding_size
__SCREAMING_SNAKE_CASE: Optional[int] = num_hidden_layers
__SCREAMING_SNAKE_CASE: List[str] = num_attention_heads
__SCREAMING_SNAKE_CASE: Optional[Any] = intermediate_size
__SCREAMING_SNAKE_CASE: Any = hidden_act
__SCREAMING_SNAKE_CASE: List[str] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: Union[str, Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Optional[int] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Optional[int] = type_vocab_size
__SCREAMING_SNAKE_CASE: str = type_sequence_label_size
__SCREAMING_SNAKE_CASE: List[str] = initializer_range
__SCREAMING_SNAKE_CASE: Union[str, Any] = num_labels
__SCREAMING_SNAKE_CASE: Dict = num_choices
__SCREAMING_SNAKE_CASE: Optional[Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: List[str] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE: str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: Tuple = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE: Tuple = None
__SCREAMING_SNAKE_CASE: int = None
__SCREAMING_SNAKE_CASE: Any = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE: int = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE: List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = MegatronBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = MegatronBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Union[str, Any] = MegatronBertForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = MegatronBertForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = MegatronBertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , next_sentence_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = MegatronBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.num_labels
__SCREAMING_SNAKE_CASE: List[Any] = MegatronBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = self.num_labels
__SCREAMING_SNAKE_CASE: Union[str, Any] = MegatronBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.num_choices
__SCREAMING_SNAKE_CASE: Union[str, Any] = MegatronBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE: Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE: str = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
): List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE: str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[str] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = True
# test_resize_embeddings = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = MegatronBertModelTester(self )
__SCREAMING_SNAKE_CASE: Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowerCAmelCase )
def lowerCAmelCase ( UpperCamelCase__ : str ) -> Any:
"""simple docstring"""
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
lowerCAmelCase : Tuple = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[int] = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__SCREAMING_SNAKE_CASE: List[str] = os.path.join(os.environ['''MYDIR'''] , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = MegatronBertModel.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.half()
__SCREAMING_SNAKE_CASE: Dict = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE: Optional[int] = model(_lowerCAmelCase )[0]
__SCREAMING_SNAKE_CASE: Union[str, Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , _lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[int] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__SCREAMING_SNAKE_CASE: Any = output[0, ii, jj]
__SCREAMING_SNAKE_CASE: Tuple = expected[3 * ii + jj]
__SCREAMING_SNAKE_CASE: Tuple = '''ii={} jj={} a={} b={}'''.format(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(math.isclose(_lowerCAmelCase , _lowerCAmelCase , rel_tol=_lowerCAmelCase , abs_tol=_lowerCAmelCase ) , msg=_lowerCAmelCase )
| 146
| 1
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
A__ : Any = '''examples/'''
A__ : Tuple = {
'''examples''': (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), '''release = "VERSION"\n'''),
}
A__ : Union[str, Any] = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
A__ : Any = '''README.md'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ) -> str:
with open(snake_case_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Optional[Any] = f.read()
__lowerCamelCase : Dict = REPLACE_PATTERNS[pattern]
__lowerCamelCase : Tuple = replace.replace('VERSION' , snake_case_ )
__lowerCamelCase : Optional[int] = re_pattern.sub(snake_case_ , snake_case_ )
with open(snake_case_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Dict:
for folder, directories, fnames in os.walk(snake_case_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case_ , snake_case_ ) , snake_case_ , pattern='examples' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case_ , snake_case_ , snake_case_ )
if not patch:
update_version_in_examples(snake_case_ )
def UpperCAmelCase__ ( ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = """🤗 Transformers currently provides the following architectures"""
__lowerCamelCase : Tuple = """1. Want to contribute a new model?"""
with open(snake_case_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Optional[int] = f.readlines()
# Find the start of the list.
__lowerCamelCase : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCamelCase : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
__lowerCamelCase : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(snake_case_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case_ )
def UpperCAmelCase__ ( ) -> int:
with open(REPLACE_FILES['init'] , 'r' ) as f:
__lowerCamelCase : str = f.read()
__lowerCamelCase : Tuple = REPLACE_PATTERNS["""init"""][0].search(snake_case_ ).groups()[0]
return packaging.version.parse(snake_case_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int=False ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
__lowerCamelCase : Union[str, Any] = default_version.base_version
elif patch:
__lowerCamelCase : List[Any] = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__lowerCamelCase : Dict = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__lowerCamelCase : List[Any] = input(F'Which version are you releasing? [{default_version}]' )
if len(snake_case_ ) == 0:
__lowerCamelCase : int = default_version
print(F'Updating version to {version}.' )
global_version_update(snake_case_ , patch=snake_case_ )
def UpperCAmelCase__ ( ) -> List[str]:
__lowerCamelCase : Tuple = get_version()
__lowerCamelCase : Tuple = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__lowerCamelCase : int = current_version.base_version
# Check with the user we got that right.
__lowerCamelCase : Any = input(F'Which version are we developing now? [{dev_version}]' )
if len(snake_case_ ) == 0:
__lowerCamelCase : Any = dev_version
print(F'Updating version to {version}.' )
global_version_update(snake_case_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
A__ : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 13
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A_ ( snake_case_ : Optional[Any] ,snake_case_ : Optional[int] ,snake_case_ : Dict ,snake_case_ : Any ,snake_case_ : Union[str, Any]=True ,snake_case_ : Dict="pt" ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {"""add_prefix_space""": True} if isinstance(snake_case_ ,snake_case_ ) and not line.startswith(""" """ ) else {}
UpperCamelCase : Optional[int] = padding_side
return tokenizer(
[line] ,max_length=snake_case_ ,padding="""max_length""" if pad_to_max_length else None ,truncation=snake_case_ ,return_tensors=snake_case_ ,add_special_tokens=snake_case_ ,**snake_case_ ,)
def A_ ( snake_case_ : int ,snake_case_ : Tuple ,snake_case_ : Tuple=None ,):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase ( _UpperCAmelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="train" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="" , ):
super().__init__()
UpperCamelCase : str = Path(SCREAMING_SNAKE_CASE_ ).joinpath(type_path + """.source""" )
UpperCamelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE_ ).joinpath(type_path + """.target""" )
UpperCamelCase : List[str] = self.get_char_lens(self.src_file )
UpperCamelCase : List[Any] = max_source_length
UpperCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
UpperCamelCase : str = tokenizer
UpperCamelCase : List[str] = prefix
if n_obs is not None:
UpperCamelCase : Optional[int] = self.src_lens[:n_obs]
UpperCamelCase : Any = src_lang
UpperCamelCase : List[Any] = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = index + 1 # linecache starts at 1
UpperCamelCase : Dict = self.prefix + linecache.getline(str(self.src_file ) , SCREAMING_SNAKE_CASE_ ).rstrip("""\n""" )
UpperCamelCase : List[str] = linecache.getline(str(self.tgt_file ) , SCREAMING_SNAKE_CASE_ ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCamelCase : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ) else self.tokenizer
)
UpperCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ ) else self.tokenizer
UpperCamelCase : List[Any] = encode_line(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.max_source_length , """right""" )
UpperCamelCase : int = encode_line(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.max_target_length , """right""" )
UpperCamelCase : Dict = source_inputs["""input_ids"""].squeeze()
UpperCamelCase : Tuple = target_inputs["""input_ids"""].squeeze()
UpperCamelCase : Tuple = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def a_ ( SCREAMING_SNAKE_CASE_ ):
return [len(SCREAMING_SNAKE_CASE_ ) for x in Path(SCREAMING_SNAKE_CASE_ ).open().readlines()]
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = torch.stack([x["""input_ids"""] for x in batch] )
UpperCamelCase : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCamelCase : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE_ )
else self.tokenizer.pad_token_id
)
UpperCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : Union[str, Any] = trim_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__A : int = getLogger(__name__)
def A_ ( snake_case_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = get_git_info()
save_json(snake_case_ ,os.path.join(snake_case_ ,"""git_log.json""" ) )
def A_ ( snake_case_ : int ,snake_case_ : int ,snake_case_ : Optional[int]=4 ,**snake_case_ : Optional[int] ):
'''simple docstring'''
with open(snake_case_ ,"""w""" ) as f:
json.dump(snake_case_ ,snake_case_ ,indent=snake_case_ ,**snake_case_ )
def A_ ( snake_case_ : List[Any] ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : Dict = git.Repo(search_parent_directories=snake_case_ )
UpperCamelCase : Dict = {
"""repo_id""": str(snake_case_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def A_ ( snake_case_ : Callable ,snake_case_ : Iterable ):
'''simple docstring'''
return list(map(snake_case_ ,snake_case_ ) )
def A_ ( snake_case_ : str ,snake_case_ : Union[str, Any] ):
'''simple docstring'''
with open(snake_case_ ,"""wb""" ) as f:
return pickle.dump(snake_case_ ,snake_case_ )
def A_ ( snake_case_ : Tuple ):
'''simple docstring'''
def remove_articles(snake_case_ : Union[str, Any] ):
return re.sub(R"""\b(a|an|the)\b""" ,""" """ ,snake_case_ )
def white_space_fix(snake_case_ : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(snake_case_ : Optional[int] ):
UpperCamelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def A_ ( snake_case_ : List[str] ,snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : int = normalize_answer(snake_case_ ).split()
UpperCamelCase : List[Any] = normalize_answer(snake_case_ ).split()
UpperCamelCase : List[str] = Counter(snake_case_ ) & Counter(snake_case_ )
UpperCamelCase : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCamelCase : str = 1.0 * num_same / len(snake_case_ )
UpperCamelCase : Union[str, Any] = 1.0 * num_same / len(snake_case_ )
UpperCamelCase : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def A_ ( snake_case_ : int ,snake_case_ : Tuple ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def A_ ( snake_case_ : List[str] ,snake_case_ : List[str] ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
UpperCamelCase : Optional[int] = 0
for hypo, pred in zip(snake_case_ ,snake_case_ ):
em += exact_match_score(snake_case_ ,snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith("""rag""" )
def A_ ( snake_case_ : List[str] ,snake_case_ : List[str] ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCamelCase : Dict = """dropout_rate"""
for p in extra_params:
if getattr(snake_case_ ,snake_case_ ,snake_case_ ):
if not hasattr(snake_case_ ,snake_case_ ) and not hasattr(snake_case_ ,equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(snake_case_ ) )
delattr(snake_case_ ,snake_case_ )
continue
UpperCamelCase : Union[str, Any] = p if hasattr(snake_case_ ,snake_case_ ) else equivalent_param[p]
setattr(snake_case_ ,snake_case_ ,getattr(snake_case_ ,snake_case_ ) )
delattr(snake_case_ ,snake_case_ )
return hparams, config
| 499
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SCREAMING_SNAKE_CASE( __A ):
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase__ , """num_encoder_blocks""" ) )
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=[2, 2, 2, 2] , lowerCamelCase__=[8, 4, 2, 1] , lowerCamelCase__=[16, 32, 64, 128] , lowerCamelCase__=[1, 4, 8, 16] , lowerCamelCase__=[1, 2, 4, 8] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_encoder_blocks
__lowercase = sr_ratios
__lowercase = depths
__lowercase = hidden_sizes
__lowercase = downsampling_rates
__lowercase = num_attention_heads
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = scope
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ) -> Any:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
__lowercase = SegformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
__lowercase = __lowercase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = SegformerForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__lowercase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__lowercase = 1
__lowercase = SegformerForSemanticSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( __A , __A , unittest.TestCase ):
snake_case_ : int = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case_ : Tuple = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ : List[str] = True
snake_case_ : Union[str, Any] = False
snake_case_ : str = False
snake_case_ : Tuple = False
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = SegformerModelTester(self )
__lowercase = SegformerConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase__ )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
pass
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowerCamelCase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.attentions
__lowercase = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# verify the first attentions (first block, first layer)
__lowercase = (self.model_tester.image_size // 4) ** 2
__lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__lowercase = (self.model_tester.image_size // 32) ** 2
__lowercase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__lowercase = len(lowerCamelCase__ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase__ ) )
__lowercase = outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# verify the first attentions (first block, first layer)
__lowercase = (self.model_tester.image_size // 4) ** 2
__lowercase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__lowercase = outputs.hidden_states
__lowercase = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase__ ):
continue
__lowercase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowercase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
__lowercase = model(**lowerCamelCase__ ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = SegformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case_ ( ):
"""simple docstring"""
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase__ , align=lowerCamelCase__ , do_random_crop=lowerCamelCase__ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase__ )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
__lowercase = encoded_inputs.pixel_values.to(lowerCamelCase__ )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )
__lowercase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase__ , align=lowerCamelCase__ , do_random_crop=lowerCamelCase__ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(lowerCamelCase__ )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
__lowercase = encoded_inputs.pixel_values.to(lowerCamelCase__ )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )
__lowercase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-1 ) )
@slow
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase__ , align=lowerCamelCase__ , do_random_crop=lowerCamelCase__ )
__lowercase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase__ )
__lowercase = prepare_img()
__lowercase = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
__lowercase = encoded_inputs.pixel_values.to(lowerCamelCase__ )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
__lowercase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
__lowercase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 163
|
'''simple docstring'''
def snake_case_ ( a__ : int = 1_00 ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i in range(1 ,n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 163
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _snake_case ( __a ):
'''simple docstring'''
UpperCamelCase__ =42
UpperCamelCase__ =42
UpperCamelCase__ =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 608
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ : Dict = False
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ (self ):
return 1_2
@property
def snake_case_ (self ):
return 1_2
@property
def snake_case_ (self ):
return 3_2
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def snake_case_ (self ):
_UpperCAmelCase : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowerCAmelCase__ )
@property
def snake_case_ (self ):
torch.manual_seed(0 )
_UpperCAmelCase : int = 1_2
_UpperCAmelCase : Tuple = 1_2
_UpperCAmelCase : Any = {
"""attention_bias""": True,
"""cross_attention_dim""": 3_2,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 3_2,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
_UpperCAmelCase : Tuple = TransformeraDModel(**lowerCAmelCase__ )
return model
def snake_case_ (self ):
_UpperCAmelCase : List[str] = """cpu"""
_UpperCAmelCase : Any = self.dummy_vqvae
_UpperCAmelCase : int = self.dummy_text_encoder
_UpperCAmelCase : Tuple = self.dummy_tokenizer
_UpperCAmelCase : List[str] = self.dummy_transformer
_UpperCAmelCase : Tuple = VQDiffusionScheduler(self.num_embed )
_UpperCAmelCase : int = LearnedClassifierFreeSamplingEmbeddings(learnable=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : int = """teddy bear playing in the pool"""
_UpperCAmelCase : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""" )
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2 )[0]
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
_UpperCAmelCase : str = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = """cpu"""
_UpperCAmelCase : Tuple = self.dummy_vqvae
_UpperCAmelCase : Dict = self.dummy_text_encoder
_UpperCAmelCase : int = self.dummy_tokenizer
_UpperCAmelCase : Any = self.dummy_transformer
_UpperCAmelCase : List[str] = VQDiffusionScheduler(self.num_embed )
_UpperCAmelCase : str = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowerCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_UpperCAmelCase : Tuple = VQDiffusionPipeline(
vqvae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , transformer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
_UpperCAmelCase : List[str] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = """teddy bear playing in the pool"""
_UpperCAmelCase : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Dict = pipe([prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe(
[prompt] , generator=lowerCAmelCase__ , output_type="""np""" , return_dict=lowerCAmelCase__ , num_inference_steps=2 )[0]
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 2_4, 2_4, 3)
_UpperCAmelCase : List[str] = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
_UpperCAmelCase : List[Any] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
_UpperCAmelCase : Union[str, Any] = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_UpperCAmelCase : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=lowerCAmelCase__ , output_type="""np""" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 414
| 0
|
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
if not isinstance(__magic_name__ , __magic_name__ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
snake_case : List[str] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
import string
import numpy
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
class a_ :
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : List[str] = numpy.vectorize(lambda a : x % 36 )
A__ : Dict = numpy.vectorize(a )
def __init__( self : List[str] , UpperCAmelCase__ : numpy.ndarray ):
"""simple docstring"""
snake_case : int = self.modulus(UpperCAmelCase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
snake_case : List[str] = encrypt_key.shape[0]
def lowerCAmelCase( self : Union[str, Any] , UpperCAmelCase__ : str ):
"""simple docstring"""
return self.key_string.index(UpperCAmelCase__ )
def lowerCAmelCase( self : List[str] , UpperCAmelCase__ : int ):
"""simple docstring"""
return self.key_string[round(UpperCAmelCase__ )]
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : Tuple = det % len(self.key_string )
snake_case : Tuple = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase__ , len(self.key_string ) ) != 1:
snake_case : List[Any] = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = [char for char in text.upper() if char in self.key_string]
snake_case : Optional[int] = chars[-1]
while len(UpperCAmelCase__ ) % self.break_key != 0:
chars.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Optional[int] = self.process_text(text.upper() )
snake_case : Optional[int] = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : int = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : Tuple = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(self.encrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[
0
]
snake_case : Dict = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase( self : str ):
"""simple docstring"""
snake_case : Optional[int] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
snake_case : int = det % len(self.key_string )
snake_case : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
snake_case : Any = i
break
snake_case : Any = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase__ ) )
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : str ):
"""simple docstring"""
snake_case : Any = self.make_decrypt_key()
snake_case : Optional[Any] = self.process_text(text.upper() )
snake_case : int = ''''''
for i in range(0 , len(UpperCAmelCase__ ) - self.break_key + 1 , self.break_key ):
snake_case : Any = text[i : i + self.break_key]
snake_case : int = [self.replace_letters(UpperCAmelCase__ ) for char in batch]
snake_case : List[str] = numpy.array([vec] ).T
snake_case : Optional[Any] = self.modulus(decrypt_key.dot(UpperCAmelCase__ ) ).T.tolist()[0]
snake_case : int = ''''''.join(
self.replace_digits(UpperCAmelCase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> None:
"""simple docstring"""
snake_case : Any = int(input('''Enter the order of the encryption key: ''' ) )
snake_case : List[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__magic_name__ ):
snake_case : Optional[Any] = [int(__magic_name__ ) for x in input().split()]
hill_matrix.append(__magic_name__ )
snake_case : List[str] = HillCipher(numpy.array(__magic_name__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
snake_case : int = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
snake_case : List[Any] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__magic_name__ ) )
elif option == "2":
snake_case : int = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 84
| 1
|
'''simple docstring'''
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "data2vec-audio"
def __init__( self , _lowerCAmelCase=32 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="gelu" , _lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase=False , _lowerCAmelCase=16 , _lowerCAmelCase=19 , _lowerCAmelCase=5 , _lowerCAmelCase=0.05 , _lowerCAmelCase=10 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=0 , _lowerCAmelCase="sum" , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=256 , _lowerCAmelCase=(512, 512, 512, 512, 1500) , _lowerCAmelCase=(5, 3, 3, 1, 1) , _lowerCAmelCase=(1, 2, 3, 1, 1) , _lowerCAmelCase=512 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = conv_pos_kernel_size
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
_lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = xvector_output_dim
@property
def _snake_case ( self ) -> str:
return math.prod(self.conv_stride )
| 18
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 17
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE = {
"""allenai/led-base-16384""": 16384,
}
class lowerCamelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = LEDTokenizer
a__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str="replace" , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : str="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="<mask>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=True , **__lowerCamelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(
__lowerCamelCase , __lowerCamelCase , tokenizer_file=__lowerCamelCase , errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase , **__lowerCamelCase , )
A : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : Any = getattr(__lowerCamelCase , pre_tok_state.pop("type" ) )
A : Any = add_prefix_space
A : Tuple = pre_tok_class(**__lowerCamelCase )
A : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : List[str] = "post_processor"
A : Union[str, Any] = getattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
if tokenizer_component_instance:
A : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
A : str = tuple(state["cls"] )
A : int = False
if state.get("add_prefix_space" , __lowerCamelCase ) != add_prefix_space:
A : List[Any] = add_prefix_space
A : Dict = True
if state.get("trim_offsets" , __lowerCamelCase ) != trim_offsets:
A : Dict = trim_offsets
A : str = True
if changes_to_apply:
A : int = getattr(__lowerCamelCase , state.pop("type" ) )
A : Dict = component_class(**__lowerCamelCase )
setattr(self.backend_tokenizer , __lowerCamelCase , __lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : Any ) -> Dict:
A : Tuple = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else value
A : Tuple = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[str] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Optional[Any] ) -> BatchEncoding:
A : List[str] = kwargs.get("is_split_into_words" , __lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
A : Optional[Any] = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any]=None ) -> List[str]:
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
A : str = [self.sep_token_id]
A : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : str , __lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ) -> dict:
A : Dict = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
A : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__lowerCamelCase )
if needs_to_be_padded:
A : Any = len(__lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : Tuple = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 17
| 1
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __a ( _snake_case, _snake_case ):
@register_to_config
def __init__( self : Optional[Any] ,lowerCamelCase : int = 768 ,):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.ones(1 ,lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Union[str, torch.device]] = None ,lowerCamelCase : Optional[torch.dtype] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Parameter(self.mean.to(lowerCamelCase ).to(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(self.std.to(lowerCamelCase ).to(lowerCamelCase ) )
return self
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (embeds * self.std) + self.mean
return embeds
| 109
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase__ ( self ):
_A = self.dummy_uncond_unet
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 'google/ncsnpp-celebahq-256'
_A = UNetaDModel.from_pretrained(snake_case_ )
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27
| 0
|
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCAmelCase__ = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _lowerCamelCase ( __a, __a ):
with open(_A, '''r''', encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ = collections.OrderedDict()
with open(_A, '''r''', encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
SCREAMING_SNAKE_CASE_ = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token]
for idx, b in enumerate(_A ):
SCREAMING_SNAKE_CASE_ = b
SCREAMING_SNAKE_CASE_ = idx
for wd in b:
SCREAMING_SNAKE_CASE_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case ( _UpperCAmelCase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['input_ids', 'attention_mask']
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_="<|startoftext|>" , SCREAMING_SNAKE_CASE_="<|endoftext|>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(
unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , do_clean_text=__UpperCamelCase , **__UpperCamelCase , )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
f'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
f'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' )
SCREAMING_SNAKE_CASE_ = do_clean_text
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = load_vocab_and_emoji(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _lowercase (self ):
"""simple docstring"""
return len(self.raw_vocab )
def _lowercase (self ):
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.subword_tokenizer.tokenize(__UpperCamelCase , clean=self.do_clean_text )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(__UpperCamelCase )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''''''.join(__UpperCamelCase ).strip()
return out_string
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ = input_ids[-self.model_max_length :]
return input_ids
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 0
if os.path.isdir(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] )
else:
SCREAMING_SNAKE_CASE_ = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file''']
)
SCREAMING_SNAKE_CASE_ = (
(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file''']
)
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(''','''.join(__UpperCamelCase ) + '''\n''' )
index += 1
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
json.dump(self.emoji , __UpperCamelCase )
return vocab_file, emoji_file
class snake_case ( _UpperCAmelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = vocab # same as swe
SCREAMING_SNAKE_CASE_ = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ = emoji
SCREAMING_SNAKE_CASE_ = np.max([len(__UpperCamelCase ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ = re.compile(R'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' )
SCREAMING_SNAKE_CASE_ = re.compile(R'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' )
SCREAMING_SNAKE_CASE_ = re.compile(R'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' )
SCREAMING_SNAKE_CASE_ = re.compile(
R'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
SCREAMING_SNAKE_CASE_ = re.compile(
R'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' )
SCREAMING_SNAKE_CASE_ = re.compile(
R'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' )
SCREAMING_SNAKE_CASE_ = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'''
SCREAMING_SNAKE_CASE_ = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'''
SCREAMING_SNAKE_CASE_ = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} )
def __len__(self ):
"""simple docstring"""
return len(self.ids_to_tokens )
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('''<URL>''' , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('''<EMAIL>''' , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('''<TEL>''' , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('''<DATE>''' , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('''<DATE>''' , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = self.content_repattera.sub('''<PRICE>''' , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ = content.replace('''<BLOCK><BLOCK>''' , '''<BLOCK>''' )
return content
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = text.replace(''' ''' , '''<SP>''' )
SCREAMING_SNAKE_CASE_ = text.replace(''' ''' , '''<SP>''' )
SCREAMING_SNAKE_CASE_ = text.replace('''\r\n''' , '''<BR>''' )
SCREAMING_SNAKE_CASE_ = text.replace('''\n''' , '''<BR>''' )
SCREAMING_SNAKE_CASE_ = text.replace('''\r''' , '''<BR>''' )
SCREAMING_SNAKE_CASE_ = text.replace('''\t''' , '''<TAB>''' )
SCREAMING_SNAKE_CASE_ = text.replace('''—''' , '''ー''' )
SCREAMING_SNAKE_CASE_ = text.replace('''−''' , '''ー''' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ = text.replace(__UpperCamelCase , __UpperCamelCase )
if clean:
SCREAMING_SNAKE_CASE_ = self.clean_text(__UpperCamelCase )
def check_simbol(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 2:
SCREAMING_SNAKE_CASE_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 3:
SCREAMING_SNAKE_CASE_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = []
while pos < len(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = min(len(__UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3
SCREAMING_SNAKE_CASE_ = [] # (token_id, token, pos)
for e in range(__UpperCamelCase , __UpperCamelCase , -1 ):
SCREAMING_SNAKE_CASE_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCamelCase ) > 2:
SCREAMING_SNAKE_CASE_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCamelCase ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = sorted(__UpperCamelCase , key=lambda SCREAMING_SNAKE_CASE_ : x[0] )[0]
result.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = e
else:
SCREAMING_SNAKE_CASE_ = pos + 1
SCREAMING_SNAKE_CASE_ = text[pos:end]
if check_simbol(__UpperCamelCase ):
result.append('''<KIGOU>''' )
elif checkuae(__UpperCamelCase ):
result.append('''<U2000U2BFF>''' )
else:
for i in wd.encode('''utf-8''' ):
result.append('''<|byte%d|>''' % i )
SCREAMING_SNAKE_CASE_ = end
return result
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="\n" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode('''utf-8''' , errors='''replace''' ) )
SCREAMING_SNAKE_CASE_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['''emoji_inv'''][word] )
elif word == "<SP>":
words.append(''' ''' )
elif word == "<BR>":
words.append(__UpperCamelCase )
elif word == "<TAB>":
words.append('''\t''' )
elif word == "<BLOCK>":
words.append('''▀''' )
elif word == "<KIGOU>":
words.append('''ǀ''' )
elif word == "<U2000U2BFF>":
words.append('''‖''' )
else:
words.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode('''utf-8''' , errors='''replace''' ) )
SCREAMING_SNAKE_CASE_ = ''''''.join(__UpperCamelCase )
return text
| 719
|
"""simple docstring"""
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __a, __a, __a, __a ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = BigBirdConfig.from_json_file(__a )
print(F'Building PyTorch model from configuration: {config}' )
if is_trivia_qa:
SCREAMING_SNAKE_CASE_ = BigBirdForQuestionAnswering(__a )
else:
SCREAMING_SNAKE_CASE_ = BigBirdForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--big_bird_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.'
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 628
| 0
|
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : Optional[Any] = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 576
|
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str] = '''camembert'''
def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> int:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 83
|
from __future__ import annotations
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
_A = xa
_A = fxa
return area
if __name__ == "__main__":
def snake_case ( snake_case__ :Tuple) -> List[str]:
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 83
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__a = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30
|
from __future__ import annotations
_a : Dict = list[list[int]]
# assigning initial values to the grid
_a : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_a : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase__ ( _A: Matrix , _A: int , _A: int , _A: int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
if location := find_empty_location(_A ):
__lowerCamelCase , __lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_A , _A , _A , _A ):
__lowerCamelCase = digit
if sudoku(_A ) is not None:
return grid
__lowerCamelCase = 0
return None
def UpperCamelCase__ ( _A: Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(_A , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
_a : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 479
| 0
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_A = NewType('DataClass', Any)
_A = NewType('DataClassType', Any)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list ) -> Callable[[str], Any]:
"""simple docstring"""
a_ = {str(UpperCamelCase ): choice for choice in choices}
return lambda UpperCamelCase : str_to_choice.get(UpperCamelCase , UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( *,
UpperCamelCase : Union[str, List[str]] = None , UpperCamelCase : str = None , UpperCamelCase : Any = dataclasses.MISSING , UpperCamelCase : Callable[[], Any] = dataclasses.MISSING , UpperCamelCase : dict = None , **UpperCamelCase : Tuple , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
a_ = {}
if aliases is not None:
a_ = aliases
if help is not None:
a_ = help
return dataclasses.field(metadata=UpperCamelCase , default=UpperCamelCase , default_factory=UpperCamelCase , **UpperCamelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Iterable[DataClassType]
def __init__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
a_ = ArgumentDefaultsHelpFormatter
super().__init__(**_SCREAMING_SNAKE_CASE )
if dataclasses.is_dataclass(_SCREAMING_SNAKE_CASE ):
a_ = [dataclass_types]
a_ = list(_SCREAMING_SNAKE_CASE )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_SCREAMING_SNAKE_CASE )
@staticmethod
def __magic_name__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = f"""--{field.name}"""
a_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _SCREAMING_SNAKE_CASE ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
a_ = kwargs.pop("""aliases""" , [] )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = [aliases]
a_ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_SCREAMING_SNAKE_CASE , """UnionType""" ) and isinstance(_SCREAMING_SNAKE_CASE , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_SCREAMING_SNAKE_CASE ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(_SCREAMING_SNAKE_CASE ) not in field.type.__args__:
# filter `str` in Union
a_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
a_ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
a_ = (
field.type.__args__[0] if isinstance(_SCREAMING_SNAKE_CASE , field.type.__args__[1] ) else field.type.__args__[1]
)
a_ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
a_ = {}
if origin_type is Literal or (isinstance(field.type , _SCREAMING_SNAKE_CASE ) and issubclass(field.type , _SCREAMING_SNAKE_CASE )):
if origin_type is Literal:
a_ = field.type.__args__
else:
a_ = [x.value for x in field.type]
a_ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
a_ = field.default
else:
a_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
a_ = copy(_SCREAMING_SNAKE_CASE )
# Hack because type=bool in argparse does not behave as we want.
a_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
a_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
a_ = default
# This tells argparse we accept 0 or 1 value after --field_name
a_ = """?"""
# This is the value that will get picked if we do --field_name (without value)
a_ = True
elif isclass(_SCREAMING_SNAKE_CASE ) and issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = field.type.__args__[0]
a_ = """+"""
if field.default_factory is not dataclasses.MISSING:
a_ = field.default_factory()
elif field.default is dataclasses.MISSING:
a_ = True
else:
a_ = field.type
if field.default is not dataclasses.MISSING:
a_ = field.default
elif field.default_factory is not dataclasses.MISSING:
a_ = field.default_factory()
else:
a_ = True
parser.add_argument(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
a_ = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """_argument_group_name""" ):
a_ = self.add_argument_group(dtype._argument_group_name )
else:
a_ = self
try:
a_ = get_type_hints(_SCREAMING_SNAKE_CASE )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_SCREAMING_SNAKE_CASE ):
a_ = """.""".join(map(_SCREAMING_SNAKE_CASE , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_SCREAMING_SNAKE_CASE ):
if not field.init:
continue
a_ = type_hints[field.name]
self._parse_dataclass_field(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
a_ = []
if args_filename:
args_files.append(Path(_SCREAMING_SNAKE_CASE ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
a_ = ArgumentParser()
args_file_parser.add_argument(_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
a_ , a_ = args_file_parser.parse_known_args(args=_SCREAMING_SNAKE_CASE )
a_ = vars(_SCREAMING_SNAKE_CASE ).get(args_file_flag.lstrip("""-""" ) , _SCREAMING_SNAKE_CASE )
if cmd_args_file_paths:
args_files.extend([Path(_SCREAMING_SNAKE_CASE ) for p in cmd_args_file_paths] )
a_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
a_ = file_args + args if args is not None else file_args + sys.argv[1:]
a_ , a_ = self.parse_known_args(args=_SCREAMING_SNAKE_CASE )
a_ = []
for dtype in self.dataclass_types:
a_ = {f.name for f in dataclasses.fields(_SCREAMING_SNAKE_CASE ) if f.init}
a_ = {k: v for k, v in vars(_SCREAMING_SNAKE_CASE ).items() if k in keys}
for k in keys:
delattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ = dtype(**_SCREAMING_SNAKE_CASE )
outputs.append(_SCREAMING_SNAKE_CASE )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_SCREAMING_SNAKE_CASE )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
a_ = set(args.keys() )
a_ = []
for dtype in self.dataclass_types:
a_ = {f.name for f in dataclasses.fields(_SCREAMING_SNAKE_CASE ) if f.init}
a_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
a_ = dtype(**_SCREAMING_SNAKE_CASE )
outputs.append(_SCREAMING_SNAKE_CASE )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_SCREAMING_SNAKE_CASE )}""" )
return tuple(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
with open(Path(_SCREAMING_SNAKE_CASE ) , encoding="""utf-8""" ) as open_json_file:
a_ = json.loads(open_json_file.read() )
a_ = self.parse_dict(_SCREAMING_SNAKE_CASE , allow_extra_keys=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
a_ = self.parse_dict(yaml.safe_load(Path(_SCREAMING_SNAKE_CASE ).read_text() ) , allow_extra_keys=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 403
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=1000 , _SCREAMING_SNAKE_CASE=[3, 3, 6, 4] , _SCREAMING_SNAKE_CASE=[48, 56, 112, 220] , ):
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = num_labels
a_ = image_size
a_ = layer_depths
a_ = embed_dims
def __magic_name__ ( self ):
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels )
a_ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_SCREAMING_SNAKE_CASE , layer_scale_init_value=1E-5 , )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = SwiftFormerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = self.num_labels
a_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
a_ = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ):
((a_) , (a_) , (a_)) = self.prepare_config_and_inputs()
a_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCamelCase : List[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_lowerCamelCase : Optional[Any] = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : List[Any] = False
def __magic_name__ ( self ):
a_ = SwiftFormerModelTester(self )
a_ = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def __magic_name__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(_SCREAMING_SNAKE_CASE )
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __magic_name__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(_SCREAMING_SNAKE_CASE )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __magic_name__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = SwiftFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
a_ = outputs.hidden_states
a_ = 8
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __magic_name__ ( self ):
def _config_zero_init(_SCREAMING_SNAKE_CASE ):
a_ = copy.deepcopy(_SCREAMING_SNAKE_CASE )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1E-10 )
if isinstance(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
a_ = _config_zero_init(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return configs_no_init
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
a_ = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ ( self ):
pass
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ):
a_ = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_SCREAMING_SNAKE_CASE )
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
a_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
a_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
a_ = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 403
| 1
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join(sorted(__SCREAMING_SNAKE_CASE ) )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return word_by_signature[signature(__SCREAMING_SNAKE_CASE )]
__lowerCamelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__lowerCamelCase : int = sorted({word.strip().lower() for word in data.splitlines()})
__lowerCamelCase : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__lowerCamelCase : Any = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 404
|
'''simple docstring'''
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[0] * len(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
_UpperCamelCase =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCamelCase =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCamelCase =j
return prefix_result
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return max(prefix_function(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 404
| 1
|
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Dict )->str:
_UpperCAmelCase = ''''''
_UpperCAmelCase = ''''''
_UpperCAmelCase = []
def lowercase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int )->int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCAmelCase = self.__min_dist_top_down_dp(__UpperCamelCase , n - 1 )
_UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , __UpperCamelCase )
_UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCAmelCase = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self.dp[m][n]
def lowercase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : str )->int:
_UpperCAmelCase = worda
_UpperCAmelCase = worda
_UpperCAmelCase = [[-1 for _ in range(len(__UpperCamelCase ) )] for _ in range(len(__UpperCamelCase ) )]
return self.__min_dist_top_down_dp(len(__UpperCamelCase ) - 1 , len(__UpperCamelCase ) - 1 )
def lowercase__ ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str )->int:
_UpperCAmelCase = worda
_UpperCAmelCase = worda
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCAmelCase = j
elif j == 0: # second string is empty
_UpperCAmelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCAmelCase = self.dp[i - 1][j - 1]
else:
_UpperCAmelCase = self.dp[i][j - 1]
_UpperCAmelCase = self.dp[i - 1][j]
_UpperCAmelCase = self.dp[i - 1][j - 1]
_UpperCAmelCase = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
__A : List[Any] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
__A : Dict = input("Enter the first string: ").strip()
__A : str = input("Enter the second string: ").strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 95
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Tuple , __UpperCamelCase : str )->Tuple:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
_UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->Optional[Any]:
_UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , torchscript=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def lowercase__ ( self : Any )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , fpaa=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Tuple )->Any:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
# set architectures equal to `None`
_UpperCAmelCase = None
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[Any] )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def lowercase__ ( self : List[str] )->int:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCamelCase , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : str )->Dict:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : Optional[int] )->Optional[Any]:
_UpperCAmelCase = '''sshleifer/tinier_bart'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Optional[int] )->int:
_UpperCAmelCase = '''sshleifer/tinier_bart'''
_UpperCAmelCase = AutoConfig.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
_UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase__ ( self : Optional[Any] )->int:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(__UpperCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(__UpperCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(__UpperCamelCase , '''env.csv''' ) , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''env.csv''' ) ).exists() )
def lowercase__ ( self : Tuple )->List[Any]:
_UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(__UpperCamelCase : str ):
self.assertTrue(hasattr(__UpperCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''current''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , '''log.txt''' ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , multi_process=__UpperCamelCase , )
_UpperCAmelCase = PyTorchBenchmark(__UpperCamelCase )
_UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , '''log.txt''' ) ).exists() )
| 95
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 695
|
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695
| 1
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : str = "cpu" , snake_case : Union[str, None] = None )-> None:
_lowerCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_lowerCamelCase = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 720
|
"""simple docstring"""
A_ : List[Any] =9.8_0665
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : float , snake_case : float = g )-> float:
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 222
| 0
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = BlipImageProcessor()
UpperCamelCase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
UpperCamelCase = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
UpperCamelCase = InstructBlipProcessor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def A ( self : Tuple , **UpperCamelCase__ : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).tokenizer
def A ( self : Any , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).image_processor
def A ( self : List[Any] , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ).qformer_tokenizer
def A ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCamelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
UpperCamelCase = qformer_tokenizer(UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_qformer_tokenizer()
UpperCamelCase = InstructBlipProcessor(
tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ , qformer_tokenizer=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 430
| 0
|
"""simple docstring"""
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowerCamelCase_ = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowerCamelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase_ = dict(zip(vocab, range(len(vocab))))
lowerCamelCase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = Path(tmpdirname)
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowerCamelCase_ = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowerCamelCase_ = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowerCamelCase_ = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowerCamelCase_ = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
lowerCamelCase_ = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowerCamelCase_ = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 713
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''levit'''
def __init__( self : List[str] , lowerCAmelCase_ : int=224 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Tuple=[128, 256, 384] , lowerCAmelCase_ : Optional[int]=[4, 8, 12] , lowerCAmelCase_ : str=[4, 4, 4] , lowerCAmelCase_ : Dict=[16, 16, 16] , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[int]=[2, 2, 2] , lowerCAmelCase_ : Any=[2, 2, 2] , lowerCAmelCase_ : int=0.0_2 , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : str = kernel_size
UpperCAmelCase_ : List[Any] = stride
UpperCAmelCase_ : List[str] = padding
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : int = key_dim
UpperCAmelCase_ : List[str] = drop_path_rate
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Optional[int] = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
| 463
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = ["image_processor", "tokenizer"]
_A = "FlavaImageProcessor"
_A = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
_a = kwargs.pop('feature_extractor' )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_a = self.image_processor
def __call__( self : str , SCREAMING_SNAKE_CASE_ : Optional[ImageInput] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str, TruncationStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_a = self.tokenizer(
text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if images is not None:
_a = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_image_mask=SCREAMING_SNAKE_CASE_ , return_codebook_pixels=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if text is not None and images is not None:
encoding.update(SCREAMING_SNAKE_CASE_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Any ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _UpperCAmelCase ( self : Tuple ):
_a = self.tokenizer.model_input_names
_a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self : int ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self : int ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 562
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase_ = TypeVar('T')
class _UpperCamelCase ( Generic[T] ):
'''simple docstring'''
_A = 42 # Cache store of keys
_A = 42 # References of the keys in cache
_A = 1_0 # Maximum capacity of cache
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int ):
_a = deque()
_a = set()
if not n:
_a = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
_a = n
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_a = self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE_ )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE_ )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE_ )
self.key_reference.add(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE_ )
def __repr__( self : Optional[int] ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 562
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: Dict = logging.get_logger(__name__)
_A: str = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase ( UpperCAmelCase_ ):
_A : str = """vivit"""
def __init__( self , __A=224 , __A=32 , __A=[2, 16, 16] , __A=3 , __A=768 , __A=12 , __A=12 , __A=3_072 , __A="gelu_fast" , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1E-06 , __A=True , **__A , ):
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = image_size
__UpperCAmelCase = num_frames
__UpperCAmelCase = tubelet_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = qkv_bias
super().__init__(**__A )
| 617
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A: str = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Optional[Any] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Dict = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_A: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 617
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.