code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCamelCase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Any:
_SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE = R".*/layers_(\d+)"
_SCREAMING_SNAKE_CASE = key
if re.match(__A , __A ):
_SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"block/\1/layer" , __A )
_SCREAMING_SNAKE_CASE = R"(encoder|decoder)\/"
if re.match(__A , __A ):
_SCREAMING_SNAKE_CASE = re.match(__A , __A ).groups()
if groups[0] == "encoder":
_SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/1/mlp/" , __A )
_SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , __A )
elif groups[0] == "decoder":
_SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/2/mlp/" , __A )
_SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , __A )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_SCREAMING_SNAKE_CASE = new_key.replace(__A , __A )
print(f"""{key} -> {new_key}""" )
_SCREAMING_SNAKE_CASE = s_dict.pop(__A )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_SCREAMING_SNAKE_CASE = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_SCREAMING_SNAKE_CASE = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
_SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(__A ):
_SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(__A )
return s_dict
lowerCamelCase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : int ) -> int:
import regex as re
with open(__A , "r" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
_SCREAMING_SNAKE_CASE = re.findall(R"(.*) = ([0-9.]*)" , __A )
_SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_SCREAMING_SNAKE_CASE = float(__A ) if "." in value else int(__A )
_SCREAMING_SNAKE_CASE = re.findall(R"(.*activations) = \(\'(.*)\',\)" , __A )[0]
_SCREAMING_SNAKE_CASE = str(activation[1] )
_SCREAMING_SNAKE_CASE = num_experts
_SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**__A )
return config
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : List[Any] , __A : Optional[int]=None , __A : Optional[Any]="./" , __A : Any=8 ) -> Any:
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
_SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(__A )
if gin_file is not None:
_SCREAMING_SNAKE_CASE = convert_gin_to_config(__A , __A )
else:
_SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(__A )
_SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(__A )
_SCREAMING_SNAKE_CASE = flax_params["target"]
_SCREAMING_SNAKE_CASE = flatten_dict(__A , sep="/" )
_SCREAMING_SNAKE_CASE = rename_keys(__A )
_SCREAMING_SNAKE_CASE = unflatten_dict(__A , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(__A )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
lowerCamelCase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 418 |
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
if len(lowercase ) != len(lowercase ):
raise ValueError('String lengths must match!' )
lowerCamelCase_ = 0
for chara, chara in zip(lowercase , lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase_ = numpy.array([0, 0])
lowerCAmelCase_ = numpy.array([0.5, 0.8_66_02_54])
lowerCAmelCase_ = numpy.array([1, 0])
lowerCAmelCase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = initial_vectors
for _ in range(UpperCAmelCase ):
A__ = iteration_step(UpperCAmelCase )
return vectors
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = []
for i, start_vector in enumerate(vectors[:-1] ):
A__ = vectors[i + 1]
new_vectors.append(UpperCAmelCase )
A__ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = numpy.radians(UpperCAmelCase )
A__ , A__ = numpy.cos(UpperCAmelCase ), numpy.sin(UpperCAmelCase )
A__ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCAmelCase ,UpperCAmelCase )
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
A__ , A__ = zip(*UpperCAmelCase )
plt.plot(UpperCAmelCase ,UpperCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 531 |
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10 ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase_ = 10**n
lowerCamelCase_ = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 70 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _lowerCAmelCase ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =9, 14 # noqa: F841
_SCREAMING_SNAKE_CASE =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_SCREAMING_SNAKE_CASE =defaultdict(_UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_SCREAMING_SNAKE_CASE =mst(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_SCREAMING_SNAKE_CASE =tuple(answer[:2] )
_SCREAMING_SNAKE_CASE =tuple(edge[::-1] )
assert edge in result or reverse in result
| 405 |
from maths.prime_check import is_prime
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if is_prime(lowercase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | 0 |
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def snake_case ( snake_case : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = []
for line in lines:
lowerCAmelCase = re.sub(r'#.*' , '' , snake_case ) # remove comments
if line:
filtered_lines.append(snake_case )
lowerCAmelCase = '\n'.join(snake_case )
# Make a hash from all this code
lowerCAmelCase = full_str.encode('utf-8' )
return shaaaa(snake_case ).hexdigest()
# get importable module names and hash for caching
_UpperCamelCase : Union[str, Any] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_UpperCamelCase : Union[str, Any] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_UpperCamelCase : Optional[int] = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
_UpperCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 284 |
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = min(lowercase ) # min() finds the minimum value
lowerCamelCase_ = max(lowercase ) # max() finds the maximum value
lowerCamelCase_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCamelCase_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase , lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCamelCase_ = 0
for count in range(lowercase ):
while holes[count] > 0:
holes[count] -= 1
lowerCamelCase_ = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase )
print('Sorted order is:' , ' '.join(lowercase ) )
if __name__ == "__main__":
main()
| 70 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __UpperCamelCase ( lowercase_ : Tuple , lowercase_ : Any , lowercase_ : List[Any]=None , lowercase_ : Dict=None ):
"""simple docstring"""
if attention_mask is None:
a_ = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
_a : int = OPTConfig
_a : Dict = {}
_a : List[str] = '''gelu'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=99 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=20 , UpperCamelCase__=2 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=16 , UpperCamelCase__=16 , ):
"""simple docstring"""
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
a_ = embed_dim
a_ = word_embed_proj_dim
a_ = False
def _a ( self ):
"""simple docstring"""
a_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a_ = tf.concat([input_ids, eos_tensor] , axis=1 )
a_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=A_ , **self.config_updates , )
a_ = prepare_opt_inputs_dict(A_ , A_ )
return config, inputs_dict
def _a ( self , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = TFOPTModel(config=A_ )
a_ = inputs_dict['input_ids']
a_ = input_ids[:1, :]
a_ = inputs_dict['attention_mask'][:1, :]
a_ = 1
# first forward pass
a_ = model(A_ , attention_mask=A_ , use_cache=A_ )
a_ , a_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a_ = tf.concat([input_ids, next_tokens] , axis=-1 )
a_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a_ = model(A_ , attention_mask=A_ )[0]
a_ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a_ = output_from_no_past[:, -3:, random_slice_idx]
a_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
@require_tf
class __SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
"""simple docstring"""
_a : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_a : List[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
_a : str = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
_a : Tuple = False
_a : int = False
_a : Tuple = False
_a : List[Any] = 10
def _a ( self ):
"""simple docstring"""
a_ = TFOPTModelTester(self )
a_ = ConfigTester(self , config_class=A_ )
def _a ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ):
"""simple docstring"""
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
def _a ( self ):
"""simple docstring"""
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCamelCase__ , UpperCamelCase__ ):
if hasattr(A_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(A_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a_ = model_class(config=A_ )
a_ = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
a_ = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(A_ )
a_ = _get_word_embedding_weight(A_ , model.get_input_embeddings() )
a_ = _get_word_embedding_weight(A_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
a_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , A_ )
# check that weights remain the same after resizing
a_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a_ = False
self.assertTrue(A_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , A_ )
a_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
a_ = False
self.assertTrue(A_ )
def __UpperCamelCase ( lowercase_ : str ):
"""simple docstring"""
return tf.constant(lowercase_ , dtype=tf.intaa )
@require_tf
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
_a : str = 99
def _a ( self ):
"""simple docstring"""
a_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
a_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
a_ = input_ids.shape[0]
a_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
"""simple docstring"""
a_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
a_ = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
a_ = tf.not_equal(A_ , model.config.pad_token_id )
with tf.GradientTape():
a_ = model(input_ids=A_ , attention_mask=A_ ).last_hidden_state
a_ = (1, 11, 512)
self.assertEqual(output.shape , A_ )
a_ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-3 ) )
a_ = tf.function(A_ , jit_compile=A_ )
a_ = xla_generate(A_ , A_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , A_ , atol=4e-2 ) )
@require_tf
@slow
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().setUp()
a_ = 'facebook/opt-350m'
def _a ( self ):
"""simple docstring"""
a_ = TFOPTForCausalLM.from_pretrained(self.path_model )
a_ = GPTaTokenizer.from_pretrained(self.path_model )
a_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a_ = tokenizer(A_ , return_tensors='tf' , padding=A_ , add_special_tokens=A_ )
a_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
a_ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
a_ = tf.function(A_ , jit_compile=A_ )
a_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(A_ , A_ , atol=1e-4 ) )
@require_tf
@slow
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _a ( self ):
"""simple docstring"""
a_ = 'facebook/opt-125m'
a_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
a_ = []
a_ = GPTaTokenizer.from_pretrained(A_ )
a_ = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
a_ = tokenizer(A_ , return_tensors='tf' ).input_ids
a_ = model.generate(A_ , max_length=10 )
a_ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
def _a ( self ):
"""simple docstring"""
a_ = 'facebook/opt-350m'
a_ = GPTaTokenizer.from_pretrained(A_ )
a_ = TFOPTForCausalLM.from_pretrained(A_ )
a_ = 'left'
# use different length sentences to test batching
a_ = [
'Hello, my dog is a little',
'Today, I',
]
a_ = tokenizer(A_ , return_tensors='tf' , padding=A_ )
a_ = inputs['input_ids']
a_ = model.generate(input_ids=A_ , attention_mask=inputs['attention_mask'] )
a_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
a_ = model.generate(input_ids=A_ )
a_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
a_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
a_ = model.generate(input_ids=A_ , max_length=model.config.max_length - num_paddings )
a_ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
a_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
a_ = tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
a_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
def _a ( self ):
"""simple docstring"""
a_ = 'facebook/opt-350m'
a_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
a_ = []
a_ = GPTaTokenizer.from_pretrained(A_ )
a_ = TFOPTForCausalLM.from_pretrained(A_ )
for prompt in self.prompts:
a_ = tokenizer(A_ , return_tensors='tf' ).input_ids
a_ = model.generate(A_ , max_length=10 )
a_ = tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
predicted_outputs += generated_string
self.assertListEqual(A_ , A_ )
| 536 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertTokenizer
UpperCamelCase = BertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self : str ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
| 70 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = ProphetNetTokenizer
A__ : Optional[int] = False
def _a ( self : List[Any] ):
"""simple docstring"""
super().setUp()
A__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _a ( self : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
A__ = 'UNwant\u00E9d,running'
A__ = 'unwanted, running'
return input_text, output_text
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file )
A__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self : Tuple ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self : List[str] ):
"""simple docstring"""
A__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A__ = {}
for i, token in enumerate(A_ ):
A__ = i
A__ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
A__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A__ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
A__ = tokenizer(A_ , padding=A_ , return_tensors='pt' )
self.assertIsInstance(A_ , A_ )
A__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _a ( self : int ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self : int ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
A__ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
A__ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
A__ = tokenizer.build_inputs_with_special_tokens(A_ )
A__ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 9 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : List[str] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : Any=8 ):
'''simple docstring'''
lowerCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : UNetaDConditionModel , A_ : DDPMScheduler , A_ : VQModel , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
lowerCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self : List[Any] , A_ : Tuple , A_ : Dict , A_ : List[Any] , A_ : int , A_ : Any , A_ : Tuple ) -> Any:
"""simple docstring"""
if latents is None:
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ = latents.to(A_ )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def a__ ( self : int , A_ : str=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def a__ ( self : Tuple , A_ : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_ = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : List[Any] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : int = 512 , A_ : int = 512 , A_ : int = 100 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = guidance_scale > 1.0
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
lowerCamelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_ = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = {'image_embeds': image_embeds}
lowerCamelCase_ = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_ = variance_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
lowerCamelCase_ = self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_ = image * 0.5 + 0.5
lowerCamelCase_ = image.clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 70 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def a ( ) -> Any:
"""simple docstring"""
print('Making key files...' )
make_key_files('rsa' , 1_0_2_4 )
print('Key files generation successful.' )
def a ( _UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
print('Generating prime p...' )
a_ = rabinMiller.generate_large_prime(_UpperCAmelCase )
print('Generating prime q...' )
a_ = rabinMiller.generate_large_prime(_UpperCAmelCase )
a_ = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
a_ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_UpperCAmelCase , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
a_ = cryptoMath.find_mod_inverse(_UpperCAmelCase , (p - 1) * (q - 1) )
a_ = (n, e)
a_ = (n, d)
return (public_key, private_key)
def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
F'''\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
a_ , a_ = generate_key(_UpperCAmelCase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , 'w' ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 697 |
from PIL import Image
def _SCREAMING_SNAKE_CASE ( lowercase : Image ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = image.size
lowerCamelCase_ = 0
lowerCamelCase_ = image.load()
for i in range(lowercase ):
for j in range(lowercase ):
lowerCamelCase_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase ):
for i in range(lowercase ):
lowerCamelCase_ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 70 | 0 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A : int = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, 'r', encoding='utf-8') as f:
A : int = json.load(f)
@require_torch
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(A_ )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = F"""facebook/wmt19-{pair}"""
__a = self.get_tokenizer(A_ )
__a = self.get_model(A_ )
__a = bleu_data[pair]['''src''']
__a = bleu_data[pair]['''tgt''']
__a = tokenizer(A_ , return_tensors='''pt''' , truncation=A_ , padding='''longest''' ).to(A_ )
__a = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__a = tokenizer.batch_decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
__a = calculate_bleu(A_ , A_ )
print(A_ )
self.assertGreaterEqual(scores['''bleu'''] , A_ ) | 219 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
lowerCamelCase : Tuple = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
lowerCamelCase : Tuple = "|".join(sys.argv[1:])
lowerCamelCase : Any = re.compile(rF"""^({joined_dirs}).*?\.py$""")
lowerCamelCase : List[str] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 70 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = (EulerDiscreteScheduler,)
__lowerCAmelCase = 1_0
def lowercase_ ( self , **snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**A_ )
return config
def lowercase_ ( self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def lowercase_ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def lowercase_ ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def lowercase_ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase: List[str] = self.get_scheduler_config()
__UpperCAmelCase: Optional[int] = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase: Any = torch.manual_seed(0 )
__UpperCAmelCase: Optional[int] = self.dummy_model()
__UpperCAmelCase: Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase: str = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase: Dict = scheduler.scale_model_input(A_ , A_ )
__UpperCAmelCase: int = model(A_ , A_ )
__UpperCAmelCase: Dict = scheduler.step(A_ , A_ , A_ , generator=A_ )
__UpperCAmelCase: str = output.prev_sample
__UpperCAmelCase: Optional[int] = torch.sum(torch.abs(A_ ) )
__UpperCAmelCase: Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = self.scheduler_classes[0]
__UpperCAmelCase: Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__UpperCAmelCase: Optional[Any] = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCAmelCase: List[Any] = torch.manual_seed(0 )
__UpperCAmelCase: Union[str, Any] = self.dummy_model()
__UpperCAmelCase: str = self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCAmelCase: List[str] = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase: Tuple = scheduler.scale_model_input(A_ , A_ )
__UpperCAmelCase: int = model(A_ , A_ )
__UpperCAmelCase: int = scheduler.step(A_ , A_ , A_ , generator=A_ )
__UpperCAmelCase: Optional[int] = output.prev_sample
__UpperCAmelCase: List[str] = torch.sum(torch.abs(A_ ) )
__UpperCAmelCase: Union[str, Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.scheduler_classes[0]
__UpperCAmelCase: List[str] = self.get_scheduler_config()
__UpperCAmelCase: Any = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
__UpperCAmelCase: List[Any] = torch.manual_seed(0 )
__UpperCAmelCase: List[Any] = self.dummy_model()
__UpperCAmelCase: int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__UpperCAmelCase: Optional[Any] = sample.to(A_ )
for t in scheduler.timesteps:
__UpperCAmelCase: Union[str, Any] = scheduler.scale_model_input(A_ , A_ )
__UpperCAmelCase: Optional[Any] = model(A_ , A_ )
__UpperCAmelCase: int = scheduler.step(A_ , A_ , A_ , generator=A_ )
__UpperCAmelCase: int = output.prev_sample
__UpperCAmelCase: List[str] = torch.sum(torch.abs(A_ ) )
__UpperCAmelCase: List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.scheduler_classes[0]
__UpperCAmelCase: List[str] = self.get_scheduler_config()
__UpperCAmelCase: List[Any] = scheduler_class(**A_ , use_karras_sigmas=A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
__UpperCAmelCase: str = torch.manual_seed(0 )
__UpperCAmelCase: List[Any] = self.dummy_model()
__UpperCAmelCase: str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__UpperCAmelCase: Optional[Any] = sample.to(A_ )
for t in scheduler.timesteps:
__UpperCAmelCase: str = scheduler.scale_model_input(A_ , A_ )
__UpperCAmelCase: List[str] = model(A_ , A_ )
__UpperCAmelCase: Optional[Any] = scheduler.step(A_ , A_ , A_ , generator=A_ )
__UpperCAmelCase: Optional[Any] = output.prev_sample
__UpperCAmelCase: List[Any] = torch.sum(torch.abs(A_ ) )
__UpperCAmelCase: Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3 | 523 |
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(lowercase , shell=lowercase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(lowercase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(lowercase ) )
if len(lowercase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
return values.split(',' )
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
lowerCamelCase : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 70 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def _lowerCamelCase ( lowerCamelCase_: int = 100_0000 , lowerCamelCase_: int = 10 ):
'''simple docstring'''
A : Tuple = defaultdict(lowerCamelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
A : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
A : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCamelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''') | 256 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCamelCase_ = BitConfig(
conv_layer=lowercase , num_labels=10_00 , idalabel=lowercase , labelaid=lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
if "stem.conv" in name:
lowerCamelCase_ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCamelCase_ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowerCamelCase_ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowerCamelCase_ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCamelCase_ = 'bit.encoder.' + name
return name
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int , lowercase : Union[str, Any]=False ):
'''simple docstring'''
lowerCamelCase_ = get_config(lowercase )
# load original model from timm
lowerCamelCase_ = create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model
lowerCamelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowercase )
lowerCamelCase_ = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCamelCase_ = BitForImageClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# create image processor
lowerCamelCase_ = create_transform(**resolve_data_config({} , model=lowercase ) )
lowerCamelCase_ = transform.transforms
lowerCamelCase_ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCamelCase_ = BitImageProcessor(
do_resize=lowercase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = transform(lowercase ).unsqueeze(0 )
lowerCamelCase_ = processor(lowercase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCamelCase_ = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowerCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 70 | 0 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
_lowerCamelCase = "."
if __name__ == "__main__":
_lowerCamelCase = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
_lowerCamelCase = []
_lowerCamelCase = []
with open(doctest_file_path) as fp:
for line in fp:
_lowerCamelCase = line.strip()
_lowerCamelCase = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
_lowerCamelCase = "\n".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 674 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A:
'''simple docstring'''
def __init__( self : List[Any] , A_ : Dict , A_ : Union[str, Any]=13 , A_ : List[Any]=30 , A_ : Optional[Any]=2 , A_ : List[str]=3 , A_ : List[str]=True , A_ : Dict=True , A_ : List[Any]=32 , A_ : Any=2 , A_ : Any=4 , A_ : Optional[int]=37 , A_ : Dict="gelu" , A_ : List[Any]=0.1 , A_ : Optional[int]=0.1 , A_ : Union[str, Any]=10 , A_ : Optional[Any]=0.02 , A_ : List[Any]=3 , A_ : str=None , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
def a__ ( self : Any , A_ : int , A_ : int , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel(config=A_ )
lowerCamelCase_ = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
lowerCamelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a__ ( self : List[Any] , A_ : List[Any] , A_ : Any , A_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=A_ , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A_ , atol=1E-4 )
| 70 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 418 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCamelCase : Any = random.Random()
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : int=1.0 , lowercase : List[str]=None , lowercase : str=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : Dict , A_ : int=7 , A_ : str=400 , A_ : Dict=2000 , A_ : List[Any]=24 , A_ : List[Any]=24 , A_ : int=0.0 , A_ : Dict=16000 , A_ : List[Any]=True , A_ : str=True , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = do_normalize
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self : List[Any] , A_ : str=False , A_ : Union[str, Any]=False ) -> str:
"""simple docstring"""
def _flatten(A_ : List[Any] ):
return list(itertools.chain(*A_ ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = SpeechaTextFeatureExtractionTester(self )
def a__ ( self : str , A_ : Dict ) -> Dict:
"""simple docstring"""
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1E-3 ) )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(A_ , padding=A_ , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(A_ )
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 16, None]
for max_length, padding in zip(A_ , A_ ):
lowerCamelCase_ = feature_extractor(
A_ , padding=A_ , max_length=A_ , return_attention_mask=A_ )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = [np.sum(A_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 16, None]
for max_length, padding in zip(A_ , A_ ):
lowerCamelCase_ = feature_extractor(
A_ , max_length=A_ , padding=A_ , return_tensors='np' , return_attention_mask=A_ )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = [np.sum(A_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='max_length' , max_length=4 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='longest' , max_length=4 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='longest' , max_length=16 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
import torch
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self : List[str] , A_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
| 70 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case:
__snake_case: Optional[int] = BlenderbotConfig
__snake_case: Any = {}
__snake_case: Any = '''gelu'''
def __init__(self : Optional[Any] , a : int , a : Optional[int]=13 , a : List[str]=7 , a : List[str]=True , a : List[Any]=False , a : str=99 , a : str=32 , a : Optional[int]=2 , a : Union[str, Any]=4 , a : Union[str, Any]=37 , a : Tuple=0.1 , a : Any=0.1 , a : Any=20 , a : Optional[Any]=2 , a : Optional[Any]=1 , a : str=0 , ) -> Dict:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def _UpperCamelCase (self : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def _UpperCamelCase (self : Optional[Any] , a : Union[str, Any] , a : Tuple ) -> List[Any]:
"""simple docstring"""
A__ = TFBlenderbotModel(config=A_ ).get_decoder()
A__ = inputs_dict['input_ids']
A__ = input_ids[:1, :]
A__ = inputs_dict['attention_mask'][:1, :]
A__ = inputs_dict['head_mask']
A__ = 1
# first forward pass
A__ = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(A_ , attention_mask=A_ )[0]
A__ = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=None ,UpperCAmelCase=None ,UpperCAmelCase=None ,UpperCAmelCase=None ,UpperCAmelCase=None ,):
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(UpperCAmelCase ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__snake_case: str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__snake_case: List[str] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__snake_case: Optional[int] = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case: Optional[int] = True
__snake_case: Union[str, Any] = False
__snake_case: str = False
def _UpperCamelCase (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = TFBlenderbotModelTester(self )
A__ = ConfigTester(self , config_class=A_ )
def _UpperCamelCase (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_tokenizers
@require_tf
class _snake_case( unittest.TestCase ):
__snake_case: List[Any] = ['''My friends are cool but they eat too many carbs.''']
__snake_case: List[str] = '''facebook/blenderbot-400M-distill'''
@cached_property
def _UpperCamelCase (self : Tuple ) -> List[str]:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCamelCase (self : Any ) -> Union[str, Any]:
"""simple docstring"""
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def _UpperCamelCase (self : Dict ) -> Any:
"""simple docstring"""
A__ = self.tokenizer(self.src_text , return_tensors='tf' )
A__ = self.model.generate(
model_inputs.input_ids , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 531 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = TransfoXLTokenizer
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Optional[Any] , **A_ : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : List[str] , A_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = '<unk> UNwanted , running'
lowerCamelCase_ = '<unk> unwanted, running'
return input_text, output_text
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
lowerCamelCase_ = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def a__ ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
lowerCamelCase_ = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
lowerCamelCase_ = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 70 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> Any:
"""simple docstring"""
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError('String lengths must match!' )
_SCREAMING_SNAKE_CASE =0
for chara, chara in zip(_UpperCamelCase , _UpperCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 405 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Dict , A_ : int=7 , A_ : Any=3 , A_ : List[str]=30 , A_ : Union[str, Any]=400 , A_ : List[str]=True , A_ : int=None , A_ : Any=True , A_ : str=1 / 255 , A_ : int=True , A_ : List[Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=True , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : Any=False ) -> Union[str, Any]:
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(A_ , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(A_ , key=lambda A_ : item[0] )[0]
lowerCamelCase_ = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
self.assertTrue(hasattr(A_ , 'rescale_factor' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , A_ )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
@slow
def a__ ( self : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
| 70 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Dict = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 284 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''swinv2'''
UpperCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , A_ : List[Any]=224 , A_ : Optional[Any]=4 , A_ : int=3 , A_ : Dict=96 , A_ : Any=[2, 2, 6, 2] , A_ : Optional[Any]=[3, 6, 12, 24] , A_ : Tuple=7 , A_ : Tuple=4.0 , A_ : str=True , A_ : str=0.0 , A_ : Union[str, Any]=0.0 , A_ : Optional[Any]=0.1 , A_ : str="gelu" , A_ : int=False , A_ : str=0.02 , A_ : List[Any]=1E-5 , A_ : Any=32 , **A_ : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase_ = (0, 0, 0, 0)
| 70 | 0 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__lowerCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase ( ):
"""simple docstring"""
a_ = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=lowercase_ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=lowercase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=lowercase_ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=lowercase_ , default='data/dump' , help='The dump file prefix.' )
a_ = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
a_ = BertTokenizer.from_pretrained(args.tokenizer_name )
a_ = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
a_ = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
a_ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
a_ = tokenizer.special_tokens_map['cls_token'] # `<s>`
a_ = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
a_ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
a_ = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
a_ = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
a_ = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(lowercase_ )} examples to process.' )
a_ = []
a_ = 0
a_ = 10_000
a_ = time.time()
for text in data:
a_ = F'{bos} {text.strip()} {sep}'
a_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
rslt.append(lowercase_ )
iter += 1
if iter % interval == 0:
a_ = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
a_ = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(lowercase_ )} examples processed.' )
a_ = F'{args.dump_file}.{args.tokenizer_name}.pickle'
a_ = tokenizer.vocab_size
if vocab_size < (1 << 16):
a_ = [np.uintaa(lowercase_ ) for d in rslt]
else:
a_ = [np.intaa(lowercase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(lowercase_ , 'wb' ) as handle:
pickle.dump(rslt_ , lowercase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 536 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCamelCase_ = InstructBlipProcessor(A_ , A_ , A_ )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Optional[int] , **A_ : Optional[int] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer
def a__ ( self : List[str] , **A_ : str ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def a__ ( self : Tuple , **A_ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).qformer_tokenizer
def a__ ( self : str ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
lowerCamelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
self.assertIsInstance(processor.qformer_tokenizer , A_ )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(A_ , return_tensors='np' )
lowerCamelCase_ = processor(images=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = processor(text=A_ )
lowerCamelCase_ = tokenizer(A_ , return_token_type_ids=A_ )
lowerCamelCase_ = qformer_tokenizer(A_ , return_token_type_ids=A_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(A_ )
lowerCamelCase_ = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 70 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE__ = ""
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __lowerCAmelCase ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : str = " " ):
"""simple docstring"""
A__ = sentence_delimiter
def _a ( self : int , _snake_case : str ):
"""simple docstring"""
return list(A_ )
def _a ( self : List[str] , _snake_case : List[str] ):
"""simple docstring"""
A__ = []
for sent_idx, sentence in enumerate(A_ ):
chars.extend(self.process_string(A_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(A_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
SCREAMING_SNAKE_CASE__ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE__ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE__ = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
SCREAMING_SNAKE_CASE__ = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _a ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def _a ( self : Optional[Any] , _snake_case : str , _snake_case : str , _snake_case : Optional[Any]=False ):
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
A_ , A_ , truth_transform=A_ , hypothesis_transform=A_ , )["wer"]
A__ = 0
A__ = 0
for prediction, reference in zip(A_ , A_ ):
A__ = jiwer.compute_measures(
A_ , A_ , truth_transform=A_ , hypothesis_transform=A_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 9 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[Any] = Dict[str, Any]
lowerCamelCase : Dict = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , *A_ : int , **A_ : int ) -> Optional[int]:
"""simple docstring"""
super().__init__(*A_ , **A_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a__ ( self : Union[str, Any] , **A_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = {}
if "threshold" in kwargs:
lowerCamelCase_ = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : str , *A_ : Optional[int] , **A_ : Tuple ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = load_image(A_ )
lowerCamelCase_ = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase_ = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
lowerCamelCase_ = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
lowerCamelCase_ = target_size
return inputs
def a__ ( self : Union[str, Any] , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = model_inputs.pop('target_size' )
lowerCamelCase_ = self.model(**A_ )
lowerCamelCase_ = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase_ = model_inputs['bbox']
return model_outputs
def a__ ( self : str , A_ : Any , A_ : Tuple=0.9 ) -> str:
"""simple docstring"""
lowerCamelCase_ = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase_ , lowerCamelCase_ = target_size[0].tolist()
def unnormalize(A_ : Dict ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowerCamelCase_ , lowerCamelCase_ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase_ = [unnormalize(A_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [dict(zip(A_ , A_ ) ) for vals in zip(scores.tolist() , A_ , A_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase_ = self.image_processor.post_process_object_detection(A_ , A_ , A_ )
lowerCamelCase_ = raw_annotations[0]
lowerCamelCase_ = raw_annotation['scores']
lowerCamelCase_ = raw_annotation['labels']
lowerCamelCase_ = raw_annotation['boxes']
lowerCamelCase_ = scores.tolist()
lowerCamelCase_ = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase_ = [self._get_bounding_box(A_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [
dict(zip(A_ , A_ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def a__ ( self : Union[str, Any] , A_ : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = box.int().tolist()
lowerCamelCase_ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 70 | 0 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _snake_case ( snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = FlaxAutoencoderKL
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = 4
a_ = 3
a_ = (32, 32)
a_ = jax.random.PRNGKey(0 )
a_ = jax.random.uniform(A_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
a_ = self.dummy_input
return init_dict, inputs_dict
| 697 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 70 | 0 |
def __lowerCAmelCase ( a__ ) -> Any:
__a = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__a = set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Dict:
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod() | 219 |
from collections import Counter
from timeit import timeit
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" , ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" ):
'''simple docstring'''
if len(lowercase ) == 0:
return True
lowerCamelCase_ = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCamelCase_ = {}
for character in lower_case_input_str:
lowerCamelCase_ = character_freq_dict.get(lowercase , 0 ) + 1
lowerCamelCase_ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _SCREAMING_SNAKE_CASE ( lowercase : str = "" ):
'''simple docstring'''
print('\nFor string = ' , lowercase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(lowercase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(lowercase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
lowerCamelCase : int = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 70 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def UpperCamelCase__ ( _lowercase : List[Any] ) -> Dict:
__UpperCAmelCase: Optional[int] = """huggingface/label-files"""
__UpperCAmelCase: int = """imagenet-1k-id2label.json"""
__UpperCAmelCase: Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
__UpperCAmelCase: List[str] = {int(_lowercase ): v for k, v in idalabel.items()}
__UpperCAmelCase: Optional[Any] = {v: k for k, v in idalabel.items()}
__UpperCAmelCase: Optional[Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__UpperCAmelCase: str = BitConfig(
conv_layer=_lowercase , num_labels=1_0_0_0 , idalabel=_lowercase , labelaid=_lowercase , )
return config
def UpperCamelCase__ ( _lowercase : Any ) -> Any:
if "stem.conv" in name:
__UpperCAmelCase: str = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
__UpperCAmelCase: Optional[Any] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
__UpperCAmelCase: List[str] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
__UpperCAmelCase: Optional[int] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
__UpperCAmelCase: Any = """bit.encoder.""" + name
return name
def UpperCamelCase__ ( ) -> int:
__UpperCAmelCase: Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase: List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def UpperCamelCase__ ( _lowercase : str , _lowercase : int , _lowercase : Union[str, Any]=False ) -> str:
__UpperCAmelCase: List[Any] = get_config(_lowercase )
# load original model from timm
__UpperCAmelCase: Optional[int] = create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
# load state_dict of original model
__UpperCAmelCase: str = timm_model.state_dict()
for key in state_dict.copy().keys():
__UpperCAmelCase: List[Any] = state_dict.pop(_lowercase )
__UpperCAmelCase: Optional[Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
__UpperCAmelCase: List[str] = BitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# create image processor
__UpperCAmelCase: List[str] = create_transform(**resolve_data_config({} , model=_lowercase ) )
__UpperCAmelCase: str = transform.transforms
__UpperCAmelCase: Optional[int] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
__UpperCAmelCase: List[str] = BitImageProcessor(
do_resize=_lowercase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowercase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=_lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__UpperCAmelCase: str = prepare_img()
__UpperCAmelCase: List[str] = transform(_lowercase ).unsqueeze(0 )
__UpperCAmelCase: Union[str, Any] = processor(_lowercase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(_lowercase , _lowercase )
# verify logits
with torch.no_grad():
__UpperCAmelCase: Tuple = model(_lowercase )
__UpperCAmelCase: str = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
__UpperCAmelCase: Any = timm_model(_lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowercase , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 523 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : float | Decimal , lowercase : float = 10**-10 ):
'''simple docstring'''
lowerCamelCase_ = a
while True:
lowerCamelCase_ = Decimal(lowercase ) - (
Decimal(eval(lowercase ) ) / Decimal(eval(str(diff(lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowercase ) ) < precision: # noqa: S307
return float(lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""")
| 70 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline | 256 |
from __future__ import annotations
from typing import Any
class A( UpperCamelCase ):
'''simple docstring'''
pass
class A:
'''simple docstring'''
def __init__( self : List[str] , A_ : Any ) -> None:
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = None
def __iter__( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self
lowerCamelCase_ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(A_ )
yield node.data
lowerCamelCase_ = node.next_node
@property
def a__ ( self : List[str] ) -> bool:
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCamelCase : int = Node(1)
lowerCamelCase : Optional[int] = Node(2)
lowerCamelCase : Union[str, Any] = Node(3)
lowerCamelCase : List[Any] = Node(4)
print(root_node.has_loop) # False
lowerCamelCase : int = root_node.next_node
print(root_node.has_loop) # True
lowerCamelCase : Dict = Node(5)
lowerCamelCase : Optional[int] = Node(6)
lowerCamelCase : str = Node(5)
lowerCamelCase : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
lowerCamelCase : List[str] = Node(1)
print(root_node.has_loop) # False
| 70 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( lowercase_ : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = str(lowercase_ )
return len(lowercase_ ) == 9 and set(lowercase_ ) == set('''123456789''' )
def lowerCAmelCase_ ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
__SCREAMING_SNAKE_CASE : List[str] = 10_0002 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 100_2003 * base_num
if is_9_pandigital(lowercase_ ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 674 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCamelCase : int = False
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int , A_ : Dict=32 ) -> Any:
"""simple docstring"""
set_seed(0 )
lowerCamelCase_ = UNetaDModel(sample_size=A_ , in_channels=3 , out_channels=3 )
lowerCamelCase_ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=A_ , )
lowerCamelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=A_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(A_ ) for _ in range(4 )]
lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).to(A_ ) for _ in range(4 )]
lowerCamelCase_ = [torch.randint(0 , 1000 , (4,) ).long().to(A_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase_ , lowerCamelCase_ = self.get_model_optimizer(resolution=32 )
model.train().to(A_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase_ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase_ = model(A_ , timesteps[i] ).sample
lowerCamelCase_ = torch.nn.functional.mse_loss(A_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase_ , lowerCamelCase_ = self.get_model_optimizer(resolution=32 )
model.train().to(A_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase_ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase_ = model(A_ , timesteps[i] ).sample
lowerCamelCase_ = torch.nn.functional.mse_loss(A_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
| 70 | 0 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def SCREAMING_SNAKE_CASE_ ( *__A : Any ) -> Tuple:
if not isinstance(__A , __A ):
_SCREAMING_SNAKE_CASE = list(__A )
for i in range(len(__A ) ):
_SCREAMING_SNAKE_CASE = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def SCREAMING_SNAKE_CASE_ ( __A : Exception ) -> Dict:
_SCREAMING_SNAKE_CASE = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can\'t allocate memory", # CPU OOM
]
if isinstance(__A , __A ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def SCREAMING_SNAKE_CASE_ ( __A : callable = None , __A : int = 1_28 ) -> Dict:
if function is None:
return functools.partial(__A , starting_batch_size=__A )
_SCREAMING_SNAKE_CASE = starting_batch_size
def decorator(*__A : Optional[int] , **__A : Any ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_SCREAMING_SNAKE_CASE = list(inspect.signature(__A ).parameters.keys() )
# Guard against user error
if len(__A ) < (len(__A ) + 1):
_SCREAMING_SNAKE_CASE = ", ".join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__A , *__A , **__A )
except Exception as e:
if should_reduce_batch_size(__A ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 418 |
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str ):
'''simple docstring'''
if len(lowercase ) != len(lowercase ):
raise ValueError('String lengths must match!' )
lowerCamelCase_ = 0
for chara, chara in zip(lowercase , lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | 0 |
'''simple docstring'''
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
while second != 0:
A__ = first & second
first ^= second
A__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = int(input('''Enter the first number: ''').strip())
lowerCAmelCase_ = int(input('''Enter the second number: ''').strip())
print(f'''{add(first, second) = }''')
| 531 |
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10 ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase_ = 10**n
lowerCamelCase_ = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 70 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict=True , _UpperCamelCase : int="pt" ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={'add_prefix_space': True} if isinstance(_UpperCamelCase , _UpperCamelCase ) and not line.startswith(' ' ) else {}
_SCREAMING_SNAKE_CASE =padding_side
return tokenizer(
[line] , max_length=_UpperCamelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=None , ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =input_ids.ne(_UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A__ ( A__ ):
def __init__( self : int , _a : List[Any] , _a : List[Any] , _a : List[str] , _a : Dict , _a : Dict="train" , _a : Dict=None , _a : Optional[Any]=None , _a : Any=None , _a : Union[str, Any]="" , ) -> Dict:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =Path(A_ ).joinpath(type_path + '.source' )
_SCREAMING_SNAKE_CASE =Path(A_ ).joinpath(type_path + '.target' )
_SCREAMING_SNAKE_CASE =self.get_char_lens(self.src_file )
_SCREAMING_SNAKE_CASE =max_source_length
_SCREAMING_SNAKE_CASE =max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
_SCREAMING_SNAKE_CASE =tokenizer
_SCREAMING_SNAKE_CASE =prefix
if n_obs is not None:
_SCREAMING_SNAKE_CASE =self.src_lens[:n_obs]
_SCREAMING_SNAKE_CASE =src_lang
_SCREAMING_SNAKE_CASE =tgt_lang
def __len__( self : int ) -> int:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Tuple , _a : Dict ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =index + 1 # linecache starts at 1
_SCREAMING_SNAKE_CASE =self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip('\n' )
_SCREAMING_SNAKE_CASE =linecache.getline(str(self.tgt_file ) , A_ ).rstrip('\n' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_SCREAMING_SNAKE_CASE =(
self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer
)
_SCREAMING_SNAKE_CASE =self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer
_SCREAMING_SNAKE_CASE =encode_line(A_ , A_ , self.max_source_length , 'right' )
_SCREAMING_SNAKE_CASE =encode_line(A_ , A_ , self.max_target_length , 'right' )
_SCREAMING_SNAKE_CASE =source_inputs['input_ids'].squeeze()
_SCREAMING_SNAKE_CASE =target_inputs['input_ids'].squeeze()
_SCREAMING_SNAKE_CASE =source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def A ( _a : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return [len(A_ ) for x in Path(A_ ).open().readlines()]
def A ( self : Optional[Any] , _a : Optional[Any] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.stack([x['input_ids'] for x in batch] )
_SCREAMING_SNAKE_CASE =torch.stack([x['attention_mask'] for x in batch] )
_SCREAMING_SNAKE_CASE =torch.stack([x['decoder_input_ids'] for x in batch] )
_SCREAMING_SNAKE_CASE =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
_SCREAMING_SNAKE_CASE =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A_ )
else self.tokenizer.pad_token_id
)
_SCREAMING_SNAKE_CASE =trim_batch(A_ , A_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =trim_batch(A_ , A_ , attention_mask=A_ )
_SCREAMING_SNAKE_CASE ={
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCamelCase : Optional[int] = getLogger(__name__)
def _lowerCAmelCase ( _UpperCamelCase : List[List] ) -> str:
"""simple docstring"""
return list(itertools.chain.from_iterable(_UpperCamelCase ) )
def _lowerCAmelCase ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_git_info()
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , 'git_log.json' ) )
def _lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : str=4 , **_UpperCamelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
with open(_UpperCamelCase , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
with open(_UpperCamelCase ) as f:
return json.load(_UpperCamelCase )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =git.Repo(search_parent_directories=_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={
'repo_id': str(_UpperCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _lowerCAmelCase ( _UpperCamelCase : Callable , _UpperCamelCase : Iterable ) -> Optional[int]:
"""simple docstring"""
return list(map(_UpperCamelCase , _UpperCamelCase ) )
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase , 'wb' ) as f:
return pickle.dump(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
def remove_articles(_UpperCamelCase : int ):
return re.sub(r'\b(a|an|the)\b' , ' ' , _UpperCamelCase )
def white_space_fix(_UpperCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase : List[Any] ):
_SCREAMING_SNAKE_CASE =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =normalize_answer(_UpperCamelCase ).split()
_SCREAMING_SNAKE_CASE =normalize_answer(_UpperCamelCase ).split()
_SCREAMING_SNAKE_CASE =Counter(_UpperCamelCase ) & Counter(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =sum(common.values() )
if num_same == 0:
return 0
_SCREAMING_SNAKE_CASE =1.0 * num_same / len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =1.0 * num_same / len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =(2 * precision * recall) / (precision + recall)
return fa
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> Optional[int]:
"""simple docstring"""
return normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Dict:
"""simple docstring"""
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =0
for hypo, pred in zip(_UpperCamelCase , _UpperCamelCase ):
em += exact_match_score(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
em /= len(_UpperCamelCase )
return {"em": em}
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
return model_prefix.startswith('rag' )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_SCREAMING_SNAKE_CASE ='dropout_rate'
for p in extra_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if not hasattr(_UpperCamelCase , _UpperCamelCase ) and not hasattr(_UpperCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
continue
_SCREAMING_SNAKE_CASE =p if hasattr(_UpperCamelCase , _UpperCamelCase ) else equivalent_param[p]
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
return hparams, config
| 405 |
from maths.prime_check import is_prime
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if is_prime(lowercase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Any = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Dict = '''gpt_neox'''
def __init__( self , _SCREAMING_SNAKE_CASE=5_04_32 , _SCREAMING_SNAKE_CASE=61_44 , _SCREAMING_SNAKE_CASE=44 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2_45_76 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.25 , _SCREAMING_SNAKE_CASE=1_00_00 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=20_48 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = rotary_pct
lowerCAmelCase = rotary_emb_base
lowerCAmelCase = attention_dropout
lowerCAmelCase = hidden_dropout
lowerCAmelCase = classifier_dropout
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_cache
lowerCAmelCase = tie_word_embeddings
lowerCAmelCase = use_parallel_residual
lowerCAmelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
lowerCAmelCase = self.rope_scaling.get('type' , A_ )
lowerCAmelCase = self.rope_scaling.get('factor' , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 284 |
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = min(lowercase ) # min() finds the minimum value
lowerCamelCase_ = max(lowercase ) # max() finds the maximum value
lowerCamelCase_ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCamelCase_ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase , lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCamelCase_ = 0
for count in range(lowercase ):
while holes[count] > 0:
holes[count] -= 1
lowerCamelCase_ = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase )
print('Sorted order is:' , ' '.join(lowercase ) )
if __name__ == "__main__":
main()
| 70 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def _a ( *UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
pass
def __UpperCamelCase ( lowercase_ : List[str] ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
_a : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = pipeline(
'document-question-answering' , model=A_ , tokenizer=A_ , image_processor=A_ )
a_ = INVOICE_URL
a_ = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
a_ = 'What is the placebo?'
a_ = [
{
'image': load_image(A_ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _a ( self , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
a_ = dqa_pipeline(A_ , top_k=2 )
self.assertEqual(
A_ , [
[
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
a_ = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
a_ = INVOICE_URL
a_ = 'How many cats are there?'
a_ = [
{'score': 0.0_001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , A_ )
a_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , A_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(A_ , [] )
# We can optionnally pass directly the words and bounding boxes
a_ = './tests/fixtures/tests_samples/COCO/000000039769.png'
a_ = []
a_ = []
a_ = dqa_pipeline(image=A_ , question=A_ , words=A_ , boxes=A_ , top_k=2 )
self.assertEqual(A_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
a_ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_944, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_009, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _a ( self ):
"""simple docstring"""
a_ = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_974, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_948, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self ):
"""simple docstring"""
a_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=A_ )
a_ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=A_ , revision='3dc6de3' , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
a_ = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
a_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
a_ = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
# This model should also work if `image` is set to None
a_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_251, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_819, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _a ( self ):
"""simple docstring"""
a_ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=A_ )
a_ = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=A_ , revision='3dc6de3' , max_seq_len=50 , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
a_ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
a_ = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
# This model should also work if `image` is set to None
a_ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_999, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_998, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def _a ( self ):
"""simple docstring"""
a_ = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
a_ = INVOICE_URL
a_ = 'What is the invoice number?'
a_ = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _a ( self ):
"""simple docstring"""
pass
| 536 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = BertTokenizer
UpperCamelCase = BertTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = filter_non_english
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = BasicTokenizer()
lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.'
lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(A_ ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def a__ ( self : str ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCamelCase_ = tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ )
lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
| 70 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : List[str] = ["image_processor"]
A__ : Dict = "SamImageProcessor"
def __init__( self : Optional[int] , _snake_case : int ):
"""simple docstring"""
super().__init__(A_ )
A__ = self.image_processor
A__ = -10
A__ = self.image_processor.size['longest_edge']
def __call__( self : Any , _snake_case : List[str]=None , _snake_case : List[Any]=None , _snake_case : int=None , _snake_case : List[Any]=None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
A__ = self.image_processor(
A_ , return_tensors=A_ , **A_ , )
# pop arguments that are not used in the foward but used nevertheless
A__ = encoding_image_processor['original_sizes']
if hasattr(A_ , 'numpy' ): # Checks if Torch or TF tensor
A__ = original_sizes.numpy()
A__ , A__ , A__ = self._check_and_preprocess_points(
input_points=A_ , input_labels=A_ , input_boxes=A_ , )
A__ = self._normalize_and_convert(
A_ , A_ , input_points=A_ , input_labels=A_ , input_boxes=A_ , return_tensors=A_ , )
return encoding_image_processor
def _a ( self : Dict , _snake_case : int , _snake_case : Any , _snake_case : str=None , _snake_case : Union[str, Any]=None , _snake_case : List[Any]=None , _snake_case : Union[str, Any]="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(A_ ) != len(A_ ):
A__ = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] ) for point in input_points
]
else:
A__ = [
self._normalize_coordinates(self.target_size , A_ , A_ )
for point, original_size in zip(A_ , A_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
A__ , A__ = self._pad_points_and_labels(A_ , A_ )
A__ = np.array(A_ )
if input_labels is not None:
A__ = np.array(A_ )
if input_boxes is not None:
if len(A_ ) != len(A_ ):
A__ = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] , is_bounding_box=A_ )
for box in input_boxes
]
else:
A__ = [
self._normalize_coordinates(self.target_size , A_ , A_ , is_bounding_box=A_ )
for box, original_size in zip(A_ , A_ )
]
A__ = np.array(A_ )
if input_boxes is not None:
if return_tensors == "pt":
A__ = torch.from_numpy(A_ )
# boxes batch size of 1 by default
A__ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
A__ = tf.convert_to_tensor(A_ )
# boxes batch size of 1 by default
A__ = tf.expand_dims(A_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
A__ = torch.from_numpy(A_ )
# point batch size of 1 by default
A__ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
A__ = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
A__ = tf.expand_dims(A_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
A__ = torch.from_numpy(A_ )
# point batch size of 1 by default
A__ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
A__ = tf.convert_to_tensor(A_ )
# point batch size of 1 by default
A__ = tf.expand_dims(A_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def _a ( self : Tuple , _snake_case : int , _snake_case : str ):
"""simple docstring"""
A__ = max([point.shape[0] for point in input_points] )
A__ = []
for i, point in enumerate(A_ ):
if point.shape[0] != expected_nb_points:
A__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
A__ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(A_ )
A__ = processed_input_points
return input_points, input_labels
def _a ( self : Optional[Any] , _snake_case : int , _snake_case : np.ndarray , _snake_case : Union[str, Any] , _snake_case : Any=False ):
"""simple docstring"""
A__ , A__ = original_size
A__ , A__ = self.image_processor._get_preprocess_shape(A_ , longest_edge=A_ )
A__ = deepcopy(A_ ).astype(A_ )
if is_bounding_box:
A__ = coords.reshape(-1 , 2 , 2 )
A__ = coords[..., 0] * (new_w / old_w)
A__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
A__ = coords.reshape(-1 , 4 )
return coords
def _a ( self : int , _snake_case : str=None , _snake_case : Optional[Any]=None , _snake_case : str=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(A_ , 'numpy' ): # Checks for TF or Torch tensor
A__ = input_points.numpy().tolist()
if not isinstance(A_ , A_ ) or not isinstance(input_points[0] , A_ ):
raise ValueError('Input points must be a list of list of floating points.' )
A__ = [np.array(A_ ) for input_point in input_points]
else:
A__ = None
if input_labels is not None:
if hasattr(A_ , 'numpy' ):
A__ = input_labels.numpy().tolist()
if not isinstance(A_ , A_ ) or not isinstance(input_labels[0] , A_ ):
raise ValueError('Input labels must be a list of list integers.' )
A__ = [np.array(A_ ) for label in input_labels]
else:
A__ = None
if input_boxes is not None:
if hasattr(A_ , 'numpy' ):
A__ = input_boxes.numpy().tolist()
if (
not isinstance(A_ , A_ )
or not isinstance(input_boxes[0] , A_ )
or not isinstance(input_boxes[0][0] , A_ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
A__ = [np.array(A_ ).astype(np.floataa ) for box in input_boxes]
else:
A__ = None
return input_points, input_labels, input_boxes
@property
def _a ( self : List[str] ):
"""simple docstring"""
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(A_ ) )
def _a ( self : List[Any] , *_snake_case : Optional[Any] , **_snake_case : Optional[Any] ):
"""simple docstring"""
return self.image_processor.post_process_masks(*A_ , **A_ )
| 9 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : List[str] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : Any=8 ):
'''simple docstring'''
lowerCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : UNetaDConditionModel , A_ : DDPMScheduler , A_ : VQModel , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=A_ , scheduler=A_ , movq=A_ , )
lowerCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self : List[Any] , A_ : Tuple , A_ : Dict , A_ : List[Any] , A_ : int , A_ : Any , A_ : Tuple ) -> Any:
"""simple docstring"""
if latents is None:
lowerCamelCase_ = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase_ = latents.to(A_ )
lowerCamelCase_ = latents * scheduler.init_noise_sigma
return latents
def a__ ( self : int , A_ : str=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
def a__ ( self : Tuple , A_ : Union[str, Any]=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase_ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=A_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase_ , lowerCamelCase_ = cpu_offload_with_hook(A_ , A_ , prev_module_hook=A_ )
# We'll offload the last model manually.
lowerCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(A_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self : List[Any] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , A_ : int = 512 , A_ : int = 512 , A_ : int = 100 , A_ : float = 4.0 , A_ : int = 1 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self._execution_device
lowerCamelCase_ = guidance_scale > 1.0
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
lowerCamelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.cat(A_ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase_ = image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = negative_image_embeds.repeat_interleave(A_ , dim=0 )
lowerCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A_ )
self.scheduler.set_timesteps(A_ , device=A_ )
lowerCamelCase_ = self.scheduler.timesteps
lowerCamelCase_ = self.unet.config.in_channels
lowerCamelCase_ , lowerCamelCase_ = downscale_height_and_width(A_ , A_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ = {'image_embeds': image_embeds}
lowerCamelCase_ = self.unet(
sample=A_ , timestep=A_ , encoder_hidden_states=A_ , added_cond_kwargs=A_ , return_dict=A_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase_ , lowerCamelCase_ = noise_pred.chunk(2 )
lowerCamelCase_ , lowerCamelCase_ = variance_pred.chunk(2 )
lowerCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase_ , lowerCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
A_ , A_ , A_ , generator=A_ , )[0]
# post-processing
lowerCamelCase_ = self.movq.decode(A_ , force_not_quantize=A_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase_ = image * 0.5 + 0.5
lowerCamelCase_ = image.clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ )
| 70 | 0 |
'''simple docstring'''
from maths.prime_check import is_prime
def a ( _UpperCAmelCase ) -> Dict:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCAmelCase )
if is_prime(_UpperCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697 |
from PIL import Image
def _SCREAMING_SNAKE_CASE ( lowercase : Image ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = image.size
lowerCamelCase_ = 0
lowerCamelCase_ = image.load()
for i in range(lowercase ):
for j in range(lowercase ):
lowerCamelCase_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase ):
for i in range(lowercase ):
lowerCamelCase_ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 70 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 219 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
lowerCamelCase : Tuple = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("utf-8").split()
)
lowerCamelCase : Tuple = "|".join(sys.argv[1:])
lowerCamelCase : Any = re.compile(rF"""^({joined_dirs}).*?\.py$""")
lowerCamelCase : List[str] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 70 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """swinv2"""
__lowerCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=224 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=[2, 2, 6, 2] , snake_case_=[3, 6, 12, 24] , snake_case_=7 , snake_case_=4.0 , snake_case_=True , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_="gelu" , snake_case_=False , snake_case_=0.0_2 , snake_case_=1e-5 , snake_case_=32 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**A_ )
__UpperCAmelCase: List[str] = image_size
__UpperCAmelCase: str = patch_size
__UpperCAmelCase: Optional[Any] = num_channels
__UpperCAmelCase: Any = embed_dim
__UpperCAmelCase: List[str] = depths
__UpperCAmelCase: Optional[int] = len(A_ )
__UpperCAmelCase: str = num_heads
__UpperCAmelCase: List[Any] = window_size
__UpperCAmelCase: Optional[Any] = mlp_ratio
__UpperCAmelCase: str = qkv_bias
__UpperCAmelCase: Optional[Any] = hidden_dropout_prob
__UpperCAmelCase: Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase: Union[str, Any] = drop_path_rate
__UpperCAmelCase: Union[str, Any] = hidden_act
__UpperCAmelCase: Union[str, Any] = use_absolute_embeddings
__UpperCAmelCase: List[Any] = layer_norm_eps
__UpperCAmelCase: Dict = initializer_range
__UpperCAmelCase: Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase: Optional[int] = int(embed_dim * 2 ** (len(A_ ) - 1) )
__UpperCAmelCase: Union[str, Any] = (0, 0, 0, 0) | 523 |
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = (
f"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowerCamelCase_ = subprocess.run(lowercase , shell=lowercase , stdout=subprocess.PIPE )
lowerCamelCase_ = output.stdout.decode('utf-8' )
lowerCamelCase_ = json.loads(lowercase )
lowerCamelCase_ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(lowercase ) )
if len(lowercase ) > 0:
lowerCamelCase_ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(f"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ):
'''simple docstring'''
return values.split(',' )
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
lowerCamelCase : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 70 | 0 |
def _lowerCamelCase ( lowerCamelCase_: str ):
'''simple docstring'''
assert column_title.isupper()
A : Tuple = 0
A : int = len(lowerCamelCase_ ) - 1
A : List[Any] = 0
while index >= 0:
A : Optional[Any] = (ord(column_title[index] ) - 64) * pow(26 , lowerCamelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 256 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCamelCase_ = BitConfig(
conv_layer=lowercase , num_labels=10_00 , idalabel=lowercase , labelaid=lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
if "stem.conv" in name:
lowerCamelCase_ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCamelCase_ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowerCamelCase_ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowerCamelCase_ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCamelCase_ = 'bit.encoder.' + name
return name
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int , lowercase : Union[str, Any]=False ):
'''simple docstring'''
lowerCamelCase_ = get_config(lowercase )
# load original model from timm
lowerCamelCase_ = create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model
lowerCamelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowercase )
lowerCamelCase_ = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCamelCase_ = BitForImageClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# create image processor
lowerCamelCase_ = create_transform(**resolve_data_config({} , model=lowercase ) )
lowerCamelCase_ = transform.transforms
lowerCamelCase_ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCamelCase_ = BitImageProcessor(
do_resize=lowercase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = transform(lowercase ).unsqueeze(0 )
lowerCamelCase_ = processor(lowercase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCamelCase_ = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowerCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 70 | 0 |
"""simple docstring"""
import functools
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = len(lowercase_ )
__SCREAMING_SNAKE_CASE : str = len(lowercase_ )
@functools.cache
def min_distance(lowercase_ : int , lowercase_ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__SCREAMING_SNAKE_CASE : Optional[Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowercase_ ) , 1 + min_distance(lowercase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A:
'''simple docstring'''
def __init__( self : List[Any] , A_ : Dict , A_ : Union[str, Any]=13 , A_ : List[Any]=30 , A_ : Optional[Any]=2 , A_ : List[str]=3 , A_ : List[str]=True , A_ : Dict=True , A_ : List[Any]=32 , A_ : Any=2 , A_ : Any=4 , A_ : Optional[int]=37 , A_ : Dict="gelu" , A_ : List[Any]=0.1 , A_ : Optional[int]=0.1 , A_ : Union[str, Any]=10 , A_ : Optional[Any]=0.02 , A_ : List[Any]=3 , A_ : str=None , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
def a__ ( self : Any , A_ : int , A_ : int , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel(config=A_ )
lowerCamelCase_ = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
lowerCamelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a__ ( self : List[Any] , A_ : List[Any] , A_ : Any , A_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=A_ , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A_ , atol=1E-4 )
| 70 | 0 |
'''simple docstring'''
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( __A : Image ) -> str:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.size
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = image.load()
for i in range(__A ):
for j in range(__A ):
_SCREAMING_SNAKE_CASE = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
_SCREAMING_SNAKE_CASE = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCamelCase_ = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 418 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
lowerCamelCase : Any = random.Random()
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : int=1.0 , lowercase : List[str]=None , lowercase : str=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase_ = global_rng
lowerCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , A_ : Dict , A_ : int=7 , A_ : str=400 , A_ : Dict=2000 , A_ : List[Any]=24 , A_ : List[Any]=24 , A_ : int=0.0 , A_ : Dict=16000 , A_ : List[Any]=True , A_ : str=True , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = min_seq_length
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ = feature_size
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = padding_value
lowerCamelCase_ = sampling_rate
lowerCamelCase_ = return_attention_mask
lowerCamelCase_ = do_normalize
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self : List[Any] , A_ : str=False , A_ : Union[str, Any]=False ) -> str:
"""simple docstring"""
def _flatten(A_ : List[Any] ):
return list(itertools.chain(*A_ ) )
if equal_length:
lowerCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase_ = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = SpeechaTextFeatureExtractor if is_speech_available() else None
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = SpeechaTextFeatureExtractionTester(self )
def a__ ( self : str , A_ : Dict ) -> Dict:
"""simple docstring"""
self.assertTrue(np.all(np.mean(A_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0 ) - 1 ) < 1E-3 ) )
def a__ ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ = feature_extractor(A_ , padding=A_ , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ = np.asarray(A_ )
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
lowerCamelCase_ = feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 16, None]
for max_length, padding in zip(A_ , A_ ):
lowerCamelCase_ = feature_extractor(
A_ , padding=A_ , max_length=A_ , return_attention_mask=A_ )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = [np.sum(A_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = ['longest', 'max_length', 'do_not_pad']
lowerCamelCase_ = [None, 16, None]
for max_length, padding in zip(A_ , A_ ):
lowerCamelCase_ = feature_extractor(
A_ , max_length=A_ , padding=A_ , return_tensors='np' , return_attention_mask=A_ )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = [np.sum(A_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='max_length' , max_length=4 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='longest' , max_length=4 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowerCamelCase_ = feature_extractor(
A_ , padding='longest' , max_length=16 , truncation=A_ , return_tensors='np' , return_attention_mask=A_ , )
lowerCamelCase_ = inputs.input_features
lowerCamelCase_ = inputs.attention_mask
lowerCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
import torch
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self : List[str] , A_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
lowerCamelCase_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCamelCase_ = ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase_ = self._load_datasamples(1 )
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ = feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
| 70 | 0 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def _A ( UpperCAmelCase ):
'''simple docstring'''
A__ = np.max(UpperCAmelCase ,axis=-1 ,keepdims=UpperCAmelCase )
A__ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=UpperCAmelCase )
class _snake_case( UpperCAmelCase ):
def _UpperCamelCase (self : Tuple , **a : str ) -> str:
"""simple docstring"""
A__ = {}
if "second_text" in kwargs:
A__ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def _UpperCamelCase (self : Union[str, Any] , a : List[str] , a : int=None ) -> str:
"""simple docstring"""
return self.tokenizer(A_ , text_pair=A_ , return_tensors=self.framework )
def _UpperCamelCase (self : List[str] , a : int ) -> Optional[Any]:
"""simple docstring"""
return self.model(**A_ )
def _UpperCamelCase (self : Optional[Any] , a : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = model_outputs.logits[0].numpy()
A__ = softmax(A_ )
A__ = np.argmax(A_ )
A__ = self.model.config.idalabel[best_class]
A__ = probabilities[best_class].item()
A__ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 531 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = TransfoXLTokenizer
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : Optional[Any] , **A_ : Tuple ) -> Any:
"""simple docstring"""
lowerCamelCase_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : List[str] , A_ : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = '<unk> UNwanted , running'
lowerCamelCase_ = '<unk> unwanted, running'
return input_text, output_text
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A_ )
lowerCamelCase_ = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(A_ , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [0, 4, 8, 7] )
def a__ ( self : Any ) -> str:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = TransfoXLTokenizer(lower_case=A_ )
lowerCamelCase_ = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
lowerCamelCase_ = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(A_ ) , A_ )
self.assertEqual(tokenizer.convert_tokens_to_string(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = len(A_ )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 70 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowerCamelCase : Any = "hf-internal-testing/tiny-random-bert"
lowerCamelCase : Optional[int] = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowerCamelCase : str = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class A__ ( unittest.TestCase ):
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =cached_file(A_ , A_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A_ , A_ ) ) )
with open(os.path.join(A_ , 'refs' , 'main' ) ) as f:
_SCREAMING_SNAKE_CASE =f.read()
self.assertEqual(A_ , os.path.join(A_ , 'snapshots' , A_ , A_ ) )
self.assertTrue(os.path.isfile(A_ ) )
# File is cached at the same place the second time.
_SCREAMING_SNAKE_CASE =cached_file(A_ , A_ )
self.assertEqual(A_ , A_ )
# Using a specific revision to test the full commit hash.
_SCREAMING_SNAKE_CASE =cached_file(A_ , A_ , revision='9b8c223' )
self.assertEqual(A_ , os.path.join(A_ , 'snapshots' , A_ , A_ ) )
def A ( self : Tuple ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(A_ , 'is not a valid model identifier' ):
_SCREAMING_SNAKE_CASE =cached_file('tiny-random-bert' , A_ )
with self.assertRaisesRegex(A_ , 'is not a valid git identifier' ):
_SCREAMING_SNAKE_CASE =cached_file(A_ , A_ , revision='aaaa' )
with self.assertRaisesRegex(A_ , 'does not appear to have a file named' ):
_SCREAMING_SNAKE_CASE =cached_file(A_ , 'conf' )
def A ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(A_ , 'does not appear to have a file named' ):
_SCREAMING_SNAKE_CASE =cached_file(A_ , 'conf' )
with open(os.path.join(A_ , 'refs' , 'main' ) ) as f:
_SCREAMING_SNAKE_CASE =f.read()
self.assertTrue(os.path.isfile(os.path.join(A_ , '.no_exist' , A_ , 'conf' ) ) )
_SCREAMING_SNAKE_CASE =cached_file(A_ , 'conf' , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
_SCREAMING_SNAKE_CASE =cached_file(A_ , 'conf' , local_files_only=A_ , _raise_exceptions_for_missing_entries=A_ )
self.assertIsNone(A_ )
_SCREAMING_SNAKE_CASE =mock.Mock()
_SCREAMING_SNAKE_CASE =500
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =HTTPError
_SCREAMING_SNAKE_CASE ={}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
_SCREAMING_SNAKE_CASE =cached_file(A_ , 'conf' , _raise_exceptions_for_connection_errors=A_ )
self.assertIsNone(A_ )
# This check we did call the fake head request
mock_head.assert_called()
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , A_ ) )
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A_ , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , A_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A_ , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , A_ , revision='ahaha' )
_SCREAMING_SNAKE_CASE =get_file_from_repo('bert-base-cased' , A_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
_SCREAMING_SNAKE_CASE =json.loads(open(A_ , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 768 )
def A ( self : Dict ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE =Path(A_ ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(A_ , 'a.txt' ) , str(A_ ) )
self.assertIsNone(get_file_from_repo(A_ , 'b.txt' ) )
| 405 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Dict , A_ : int=7 , A_ : Any=3 , A_ : List[str]=30 , A_ : Union[str, Any]=400 , A_ : List[str]=True , A_ : int=None , A_ : Any=True , A_ : str=1 / 255 , A_ : int=True , A_ : List[Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=True , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : Any=False ) -> Union[str, Any]:
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(A_ , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(A_ , key=lambda A_ : item[0] )[0]
lowerCamelCase_ = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
self.assertTrue(hasattr(A_ , 'rescale_factor' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , A_ )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
@slow
def a__ ( self : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
| 70 | 0 |
'''simple docstring'''
def snake_case ( snake_case : list[int] , snake_case : list[int] , snake_case : int ) -> List[Any]:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case ) )
def snake_case ( snake_case : list[list[int]] , snake_case : int , snake_case : list[int] , snake_case : int ) -> Optional[int]:
"""simple docstring"""
if index == len(snake_case ):
return True
# Recursive Step
for i in range(snake_case ):
if valid_coloring(graph[index] , snake_case , snake_case ):
# Color current vertex
lowerCAmelCase = i
# Validate coloring
if util_color(snake_case , snake_case , snake_case , index + 1 ):
return True
# Backtrack
lowerCAmelCase = -1
return False
def snake_case ( snake_case : list[list[int]] , snake_case : int ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [-1] * len(snake_case )
if util_color(snake_case , snake_case , snake_case , 0 ):
return colored_vertices
return []
| 284 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''swinv2'''
UpperCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , A_ : List[Any]=224 , A_ : Optional[Any]=4 , A_ : int=3 , A_ : Dict=96 , A_ : Any=[2, 2, 6, 2] , A_ : Optional[Any]=[3, 6, 12, 24] , A_ : Tuple=7 , A_ : Tuple=4.0 , A_ : str=True , A_ : str=0.0 , A_ : Union[str, Any]=0.0 , A_ : Optional[Any]=0.1 , A_ : str="gelu" , A_ : int=False , A_ : str=0.02 , A_ : List[Any]=1E-5 , A_ : Any=32 , **A_ : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase_ = (0, 0, 0, 0)
| 70 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (__A , __A , __A , unittest.TestCase ):
"""simple docstring"""
_a : Optional[Any] = StableUnCLIPImgaImgPipeline
_a : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_a : Any = frozenset([] )
def _a ( self ):
"""simple docstring"""
a_ = 32
a_ = embedder_hidden_size
# image encoding components
a_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=A_ , projection_dim=A_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a_ = StableUnCLIPImageNormalizer(embedding_dim=A_ )
a_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A_ , layers_per_block=1 , upcast_attention=A_ , use_linear_projection=A_ , )
torch.manual_seed(0 )
a_ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=A_ , steps_offset=1 , )
torch.manual_seed(0 )
a_ = AutoencoderKL()
a_ = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def _a ( self , UpperCamelCase__ , UpperCamelCase__=0 , UpperCamelCase__=True ):
"""simple docstring"""
if str(A_ ).startswith('mps' ):
a_ = torch.manual_seed(A_ )
else:
a_ = torch.Generator(device=A_ ).manual_seed(A_ )
a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if pil_image:
a_ = input_image * 0.5 + 0.5
a_ = input_image.clamp(0 , 1 )
a_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a_ = DiffusionPipeline.numpy_to_pil(A_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _a ( self ):
"""simple docstring"""
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = StableUnCLIPImgaImgPipeline(**A_ )
a_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
a_ = self.get_dummy_inputs(A_ )
inputs.update({'image_embeds': None} )
a_ = sd_pipe(**A_ ).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a_ = np.array([0.3_872, 0.7_224, 0.5_601, 0.4_741, 0.6_872, 0.5_814, 0.4_636, 0.3_867, 0.5_078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _a ( self ):
"""simple docstring"""
a_ = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=A_ )
def _a ( self ):
"""simple docstring"""
a_ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=A_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=A_ )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = torch.Generator(device='cpu' ).manual_seed(0 )
a_ = pipe(A_ , 'anime turle' , generator=A_ , output_type='np' )
a_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = torch.Generator(device='cpu' ).manual_seed(0 )
a_ = pipe(A_ , 'anime turle' , generator=A_ , output_type='np' )
a_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a_ = pipe(
A_ , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 536 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = BlipImageProcessor()
lowerCamelCase_ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
lowerCamelCase_ = InstructBlipProcessor(A_ , A_ , A_ )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Optional[int] , **A_ : Optional[int] ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).tokenizer
def a__ ( self : List[str] , **A_ : str ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).image_processor
def a__ ( self : Tuple , **A_ : Any ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **A_ ).qformer_tokenizer
def a__ ( self : str ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase_ = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
lowerCamelCase_ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
self.assertIsInstance(processor.qformer_tokenizer , A_ )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(A_ , return_tensors='np' )
lowerCamelCase_ = processor(images=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = processor(text=A_ )
lowerCamelCase_ = tokenizer(A_ , return_token_type_ids=A_ )
lowerCamelCase_ = qformer_tokenizer(A_ , return_token_type_ids=A_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(A_ )
lowerCamelCase_ = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_qformer_tokenizer()
lowerCamelCase_ = InstructBlipProcessor(
tokenizer=A_ , image_processor=A_ , qformer_tokenizer=A_ )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=A_ , images=A_ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 70 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def A ( __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = math.inf , __UpperCamelCase = -math.inf , __UpperCamelCase = math.inf , __UpperCamelCase = -math.inf , __UpperCamelCase = False , __UpperCamelCase = 100 , __UpperCamelCase = 0.01 , __UpperCamelCase = 1 , ) -> Union[str, Any]:
A__ = False
A__ = search_prob
A__ = start_temperate
A__ = []
A__ = 0
A__ = None
while not search_end:
A__ = current_state.score()
if best_state is None or current_score > best_state.score():
A__ = current_state
scores.append(__UpperCamelCase )
iterations += 1
A__ = None
A__ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
A__ = random.randint(0 , len(__UpperCamelCase ) - 1 ) # picking a random neighbor
A__ = neighbors.pop(__UpperCamelCase )
A__ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
A__ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
A__ = picked_neighbor
else:
A__ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
A__ = picked_neighbor
A__ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
A__ = True
else:
A__ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__UpperCamelCase ) , __UpperCamelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def A ( __UpperCamelCase , __UpperCamelCase ) -> Any:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE__ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE__ = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE__ = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE__ = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def A ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
return (3 * x**2) - (6 * y)
SCREAMING_SNAKE_CASE__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
SCREAMING_SNAKE_CASE__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'{local_min.score()}'
)
| 9 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[Any] = Dict[str, Any]
lowerCamelCase : Dict = List[Prediction]
@add_end_docstrings(UpperCamelCase )
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , *A_ : int , **A_ : int ) -> Optional[int]:
"""simple docstring"""
super().__init__(*A_ , **A_ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a__ ( self : Union[str, Any] , **A_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = {}
if "threshold" in kwargs:
lowerCamelCase_ = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : str , *A_ : Optional[int] , **A_ : Tuple ) -> Union[Predictions, List[Prediction]]:
"""simple docstring"""
return super().__call__(*A_ , **A_ )
def a__ ( self : Union[str, Any] , A_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = load_image(A_ )
lowerCamelCase_ = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase_ = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
lowerCamelCase_ = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
lowerCamelCase_ = target_size
return inputs
def a__ ( self : Union[str, Any] , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = model_inputs.pop('target_size' )
lowerCamelCase_ = self.model(**A_ )
lowerCamelCase_ = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase_ = model_inputs['bbox']
return model_outputs
def a__ ( self : str , A_ : Any , A_ : Tuple=0.9 ) -> str:
"""simple docstring"""
lowerCamelCase_ = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase_ , lowerCamelCase_ = target_size[0].tolist()
def unnormalize(A_ : Dict ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
lowerCamelCase_ , lowerCamelCase_ = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase_ = [unnormalize(A_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [dict(zip(A_ , A_ ) ) for vals in zip(scores.tolist() , A_ , A_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase_ = self.image_processor.post_process_object_detection(A_ , A_ , A_ )
lowerCamelCase_ = raw_annotations[0]
lowerCamelCase_ = raw_annotation['scores']
lowerCamelCase_ = raw_annotation['labels']
lowerCamelCase_ = raw_annotation['boxes']
lowerCamelCase_ = scores.tolist()
lowerCamelCase_ = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase_ = [self._get_bounding_box(A_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase_ = ['score', 'label', 'box']
lowerCamelCase_ = [
dict(zip(A_ , A_ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def a__ ( self : Union[str, Any] , A_ : "torch.Tensor" ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = box.int().tolist()
lowerCamelCase_ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 70 | 0 |
'''simple docstring'''
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
"""simple docstring"""
def count_of_possible_combinations(_UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
_UpperCAmelCase , _UpperCAmelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a_ = sum(
count_of_possible_combinations_with_dp_array(target - item , _UpperCAmelCase )
for item in array )
a_ = answer
return answer
a_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
"""simple docstring"""
a_ = [0] * (target + 1)
a_ = 1
for i in range(1 , target + 1 ):
for j in range(_UpperCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase =3
__lowerCAmelCase =5
__lowerCAmelCase =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 697 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 70 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 1 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
UpperCAmelCase_ : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Any = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : Optional[Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = ["".join(_SCREAMING_SNAKE_CASE ) for row in temp_grid]
UpperCAmelCase_ : int = "".join(_SCREAMING_SNAKE_CASE )
return output_string
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
UpperCAmelCase_ : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : int = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
UpperCAmelCase_ : str = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase_ : List[Any] = input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )]
grid.append(list(_SCREAMING_SNAKE_CASE ) )
counter += len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = "" # reads as zigzag
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : Optional[Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> dict[int, str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {}
for key_guess in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # tries every key
UpperCAmelCase_ : Any = decrypt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 1 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _snake_case (enum.Enum):
__A : Tuple =0
__A : List[Any] =1
__A : Optional[int] =2
@add_end_docstrings(__SCREAMING_SNAKE_CASE)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : str ="\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self ,*_snake_case ,**_snake_case ):
super().__init__(*_snake_case ,**_snake_case )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCAmelCase_ : List[Any] = None
if self.model.config.prefix is not None:
UpperCAmelCase_ : int = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._sanitize_parameters(prefix=_snake_case ,**self._forward_params )
UpperCAmelCase_ : Optional[Any] = {**self._preprocess_params, **preprocess_params}
UpperCAmelCase_ : Tuple = {**self._forward_params, **forward_params}
def UpperCamelCase__ ( self ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,**_snake_case ,):
UpperCAmelCase_ : Tuple = {}
if prefix is not None:
UpperCAmelCase_ : Tuple = prefix
if prefix:
UpperCAmelCase_ : Tuple = self.tokenizer(
_snake_case ,padding=_snake_case ,add_special_tokens=_snake_case ,return_tensors=self.framework )
UpperCAmelCase_ : Dict = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCAmelCase_ : Dict = handle_long_generation
preprocess_params.update(_snake_case )
UpperCAmelCase_ : List[str] = generate_kwargs
UpperCAmelCase_ : List[str] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCAmelCase_ : int = ReturnType.TENSORS
if return_type is not None:
UpperCAmelCase_ : Dict = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ : int = self.tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
if len(_snake_case ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCAmelCase_ : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_snake_case ,**_snake_case )
def __call__( self ,_snake_case ,**_snake_case ):
return super().__call__(_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="" ,_snake_case=None ,**_snake_case ):
UpperCAmelCase_ : Tuple = self.tokenizer(
prefix + prompt_text ,padding=_snake_case ,add_special_tokens=_snake_case ,return_tensors=self.framework )
UpperCAmelCase_ : int = prompt_text
if handle_long_generation == "hole":
UpperCAmelCase_ : Optional[int] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCAmelCase_ : List[str] = generate_kwargs["max_new_tokens"]
else:
UpperCAmelCase_ : int = generate_kwargs.get("max_length" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCAmelCase_ : List[str] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCAmelCase_ : Optional[int] = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCamelCase__ ( self ,_snake_case ,**_snake_case ):
UpperCAmelCase_ : Optional[int] = model_inputs["input_ids"]
UpperCAmelCase_ : Optional[Any] = model_inputs.get("attention_mask" ,_snake_case )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Dict = 1
else:
UpperCAmelCase_ : Optional[int] = input_ids.shape[0]
UpperCAmelCase_ : Tuple = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCAmelCase_ : Tuple = generate_kwargs.pop("prefix_length" ,0 )
if prefix_length > 0:
UpperCAmelCase_ : Tuple = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCAmelCase_ : Any = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCAmelCase_ : int = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCAmelCase_ : int = self.model.generate(input_ids=_snake_case ,attention_mask=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = generated_sequence.shape[0]
if self.framework == "pt":
UpperCAmelCase_ : Optional[Any] = generated_sequence.reshape(_snake_case ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ : Any = tf.reshape(_snake_case ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=ReturnType.FULL_TEXT ,_snake_case=True ):
UpperCAmelCase_ : int = model_outputs["generated_sequence"][0]
UpperCAmelCase_ : int = model_outputs["input_ids"]
UpperCAmelCase_ : List[Any] = model_outputs["prompt_text"]
UpperCAmelCase_ : Optional[int] = generated_sequence.numpy().tolist()
UpperCAmelCase_ : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ : Dict = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCAmelCase_ : Any = self.tokenizer.decode(
_snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCAmelCase_ : str = 0
else:
UpperCAmelCase_ : List[str] = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case ,) )
if return_type == ReturnType.FULL_TEXT:
UpperCAmelCase_ : List[str] = prompt_text + text[prompt_length:]
else:
UpperCAmelCase_ : Tuple = text[prompt_length:]
UpperCAmelCase_ : Tuple = {"generated_text": all_text}
records.append(_snake_case )
return records
| 71 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 1 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case (nn.Module):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=1 ,_snake_case=False ):
super().__init__()
UpperCAmelCase_ : List[Any] = n_token
UpperCAmelCase_ : int = d_embed
UpperCAmelCase_ : Optional[Any] = d_proj
UpperCAmelCase_ : Tuple = cutoffs + [n_token]
UpperCAmelCase_ : List[str] = [0] + self.cutoffs
UpperCAmelCase_ : str = div_val
UpperCAmelCase_ : List[str] = self.cutoffs[0]
UpperCAmelCase_ : Optional[int] = len(self.cutoffs ) - 1
UpperCAmelCase_ : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.zeros(self.n_clusters ,self.d_embed ) )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase_ : str = nn.ModuleList()
UpperCAmelCase_ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
else:
self.out_projs.append(_snake_case )
self.out_layers.append(nn.Linear(_snake_case ,_snake_case ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_snake_case ,_snake_case ) ) )
self.out_layers.append(nn.Linear(_snake_case ,r_idx - l_idx ) )
UpperCAmelCase_ : Tuple = keep_order
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
if proj is None:
UpperCAmelCase_ : int = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase_ : Tuple = nn.functional.linear(_snake_case ,proj.t().contiguous() )
UpperCAmelCase_ : Dict = nn.functional.linear(_snake_case ,_snake_case ,bias=_snake_case )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,_snake_case=False ):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase_ : Optional[Any] = hidden[..., :-1, :].contiguous()
UpperCAmelCase_ : Any = labels[..., 1:].contiguous()
UpperCAmelCase_ : Dict = hidden.view(-1 ,hidden.size(-1 ) )
UpperCAmelCase_ : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase_ : Optional[Any] = hidden.view(-1 ,hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase_ : Any = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
if labels is not None:
UpperCAmelCase_ : Any = labels != -1_00
UpperCAmelCase_ : Any = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
UpperCAmelCase_ : Union[str, Any] = (
-nn.functional.log_softmax(_snake_case ,dim=-1 )[mask].gather(1 ,labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase_ : Optional[int] = nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Any = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : Optional[Any] = self.out_layers[i].weight
UpperCAmelCase_ : str = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
UpperCAmelCase_ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : Optional[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = nn.functional.log_softmax(_snake_case ,dim=1 )
if labels is None:
UpperCAmelCase_ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase_ : Any = torch.zeros_like(_snake_case ,dtype=hidden.dtype ,device=hidden.device )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Optional[Any] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase_ : int = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase_ : Any = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase_ : Dict = labels.index_select(0 ,_snake_case ) - l_idx
UpperCAmelCase_ : List[Any] = head_logprob.index_select(0 ,_snake_case )
UpperCAmelCase_ : Optional[int] = hidden.index_select(0 ,_snake_case )
else:
UpperCAmelCase_ : Optional[Any] = hidden
if i == 0:
if labels is not None:
UpperCAmelCase_ : List[str] = head_logprob_i.gather(1 ,target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Optional[int] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : Union[str, Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase_ : str = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 ,target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase_ : Union[str, Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase_ : Optional[int] = logprob_i
if labels is not None:
if (hasattr(self ,"keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 ,_snake_case ,-logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase__ ( self ,_snake_case ):
if self.n_clusters == 0:
UpperCAmelCase_ : Dict = self._compute_logit(_snake_case ,self.out_layers[0].weight ,self.out_layers[0].bias ,self.out_projs[0] )
return nn.functional.log_softmax(_snake_case ,dim=-1 )
else:
# construct weights and biases
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Tuple = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase_ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase_ : str = self.out_layers[i].weight
UpperCAmelCase_ : str = self.out_layers[i].bias
if i == 0:
UpperCAmelCase_ : List[Any] = torch.cat([weight_i, self.cluster_weight] ,dim=0 )
UpperCAmelCase_ : List[str] = torch.cat([bias_i, self.cluster_bias] ,dim=0 )
weights.append(_snake_case )
biases.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase_ : List[Any] = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase_ : List[Any] = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : Optional[int] = [0] + self.cutoffs
for i in range(len(_snake_case ) - 1 ):
UpperCAmelCase_ , UpperCAmelCase_ : int = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase_ : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase_ : int = self._compute_logit(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = nn.functional.log_softmax(_snake_case ,dim=1 )
UpperCAmelCase_ : Tuple = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase_ : Union[str, Any] = logprob_i
return out
| 71 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : List[Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Dict = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Tuple = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : List[str] = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Dict = VisionTextDualEncoderProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : List[Any] = processor(images=_snake_case ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[str] = "lower newer"
UpperCAmelCase_ : Tuple = processor(text=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = VisionTextDualEncoderProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : int = "lower newer"
UpperCAmelCase_ : Dict = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Any = VisionTextDualEncoderProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : Dict = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : str = "lower newer"
UpperCAmelCase_ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 1 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = position
UpperCAmelCase_ : List[str] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : List[str] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = position
if board[y][x] == 0:
UpperCAmelCase_ : int = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
UpperCAmelCase_ : int = 0
return False
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase_ : str = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = """▁"""
_lowerCamelCase = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
_lowerCamelCase = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
_lowerCamelCase = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
_lowerCamelCase = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
_lowerCamelCase = {"""mustc""": MUSTC_LANGS}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =VOCAB_FILES_NAMES
__A : List[Any] =PRETRAINED_VOCAB_FILES_MAP
__A : List[Any] =MAX_MODEL_INPUT_SIZES
__A : Optional[Any] =["input_ids", "attention_mask"]
__A : List[int] =[]
def __init__( self ,_snake_case ,_snake_case ,_snake_case="<s>" ,_snake_case="</s>" ,_snake_case="<pad>" ,_snake_case="<unk>" ,_snake_case=False ,_snake_case=False ,_snake_case=None ,_snake_case=None ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,pad_token=_snake_case ,do_upper_case=_snake_case ,do_lower_case=_snake_case ,tgt_lang=_snake_case ,lang_codes=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
UpperCAmelCase_ : Optional[Any] = do_upper_case
UpperCAmelCase_ : Dict = do_lower_case
UpperCAmelCase_ : List[str] = load_json(_snake_case )
UpperCAmelCase_ : List[Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : str = spm_file
UpperCAmelCase_ : Any = load_spm(_snake_case ,self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase_ : Optional[Any] = lang_codes
UpperCAmelCase_ : int = LANGUAGES[lang_codes]
UpperCAmelCase_ : Optional[Any] = [f'''<lang:{lang}>''' for lang in self.langs]
UpperCAmelCase_ : List[Any] = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''' ) for lang in self.langs}
UpperCAmelCase_ : int = self.lang_tokens
UpperCAmelCase_ : Optional[Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase_ : List[Any] = {}
@property
def UpperCamelCase__ ( self ):
return len(self.encoder )
@property
def UpperCamelCase__ ( self ):
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[int] = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ : str = [lang_code_id]
def UpperCamelCase__ ( self ,_snake_case ):
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
return self.encoder.get(_snake_case ,self.encoder[self.unk_token] )
def UpperCamelCase__ ( self ,_snake_case ):
return self.decoder.get(_snake_case ,self.unk_token )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase_ : Tuple = self.sp_model.decode(_snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase_ : Optional[int] = []
else:
current_sub_tokens.append(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.sp_model.decode(_snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
UpperCAmelCase_ : Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCAmelCase_ : Tuple = self.__dict__.copy()
UpperCAmelCase_ : List[str] = None
return state
def __setstate__( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : Dict = load_spm(self.spm_file ,self.sp_model_kwargs )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : Union[str, Any] = Path(_snake_case )
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
UpperCAmelCase_ : Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCAmelCase_ : Union[str, Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder ,_snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,_snake_case )
elif not os.path.isfile(self.spm_file ):
with open(_snake_case ,"wb" ) as fi:
UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (str(_snake_case ), str(_snake_case ))
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = sentencepiece.SentencePieceProcessor(**_SCREAMING_SNAKE_CASE )
spm.Load(str(_SCREAMING_SNAKE_CASE ) )
return spm
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str ) -> None:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=2 )
| 71 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _snake_case (__SCREAMING_SNAKE_CASE):
@staticmethod
@abstractmethod
def UpperCamelCase__ ( _snake_case ):
raise NotImplementedError()
@abstractmethod
def UpperCamelCase__ ( self ):
raise NotImplementedError()
| 71 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 1 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Any =(DPMSolverSDEScheduler,)
__A : Tuple =10
def UpperCamelCase__ ( self ,**_snake_case ):
UpperCAmelCase_ : List[Any] = {
"num_train_timesteps": 11_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case ,beta_end=_snake_case )
def UpperCamelCase__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config()
UpperCAmelCase_ : Optional[int] = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : str = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : Optional[Any] = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : List[str] = scheduler.scale_model_input(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,_snake_case )
UpperCAmelCase_ : int = scheduler.step(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = output.prev_sample
UpperCAmelCase_ : Optional[Any] = torch.sum(torch.abs(_snake_case ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Dict = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : Tuple = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : List[Any] = scheduler.scale_model_input(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = model(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = output.prev_sample
UpperCAmelCase_ : int = torch.sum(torch.abs(_snake_case ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Optional[int] = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase_ : str = scheduler.scale_model_input(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = model(_snake_case ,_snake_case )
UpperCAmelCase_ : int = scheduler.step(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = output.prev_sample
UpperCAmelCase_ : str = torch.sum(torch.abs(_snake_case ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_snake_case ,use_karras_sigmas=_snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=_snake_case )
UpperCAmelCase_ : Tuple = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
UpperCAmelCase_ : Any = sample.to(_snake_case )
for t in scheduler.timesteps:
UpperCAmelCase_ : Dict = scheduler.scale_model_input(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = output.prev_sample
UpperCAmelCase_ : str = torch.sum(torch.abs(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 71 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase_ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def UpperCamelCase__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase_ : Union[str, Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case ,env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase_ : List[Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case ,env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_snake_case ,env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCAmelCase_ : Any = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 ,cuda_visible_devices="0,1" ):
execute_subprocess_async(_snake_case ,env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase = Accelerator()
_lowerCamelCase = (accelerator.state.process_index + 2, 10)
_lowerCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase = """"""
_lowerCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 71 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 71 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : int =(DEISMultistepScheduler,)
__A : List[str] =(("num_inference_steps", 25),)
def UpperCamelCase__ ( self ,**_snake_case ):
UpperCAmelCase_ : Optional[Any] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self ,_snake_case=0 ,**_snake_case ):
UpperCAmelCase_ : int = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Tuple = kwargs.pop("num_inference_steps" ,_snake_case )
UpperCAmelCase_ : List[str] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Dict = self.get_scheduler_config(**_snake_case )
UpperCAmelCase_ : List[str] = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
UpperCAmelCase_ : Any = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
UpperCAmelCase_ : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = sample, sample
for t in range(_snake_case ,time_step + scheduler.config.solver_order + 1 ):
UpperCAmelCase_ : Tuple = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
UpperCAmelCase_ : Tuple = new_scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ,_snake_case=0 ,**_snake_case ):
UpperCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[Any] = kwargs.pop("num_inference_steps" ,_snake_case )
UpperCAmelCase_ : Tuple = self.dummy_sample
UpperCAmelCase_ : Optional[Any] = 0.1 * sample
UpperCAmelCase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
UpperCAmelCase_ : int = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ : Any = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
UpperCAmelCase_ : List[str] = new_scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ,_snake_case=None ,**_snake_case ):
if scheduler is None:
UpperCAmelCase_ : int = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config(**_snake_case )
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_snake_case )
UpperCAmelCase_ : Tuple = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**_snake_case )
UpperCAmelCase_ : Any = scheduler_class(**_snake_case )
UpperCAmelCase_ : Union[str, Any] = 10
UpperCAmelCase_ : List[Any] = self.dummy_model()
UpperCAmelCase_ : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Tuple = model(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case ).prev_sample
return sample
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[Any] = kwargs.pop("num_inference_steps" ,_snake_case )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Optional[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Any = scheduler_class(**_snake_case )
UpperCAmelCase_ : Optional[Any] = self.dummy_sample
UpperCAmelCase_ : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case ,"set_timesteps" ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case ,"set_timesteps" ):
UpperCAmelCase_ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
UpperCAmelCase_ : Optional[int] = scheduler.timesteps[5]
UpperCAmelCase_ : Optional[int] = scheduler.timesteps[6]
UpperCAmelCase_ : Tuple = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
UpperCAmelCase_ : int = scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCamelCase__ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
UpperCAmelCase_ : Tuple = DEISMultistepScheduler(**self.get_scheduler_config() )
UpperCAmelCase_ : Union[str, Any] = self.full_loop(scheduler=_snake_case )
UpperCAmelCase_ : List[str] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
UpperCAmelCase_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : int = DPMSolverMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : str = UniPCMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : int = DEISMultistepScheduler.from_config(scheduler.config )
UpperCAmelCase_ : Union[str, Any] = self.full_loop(scheduler=_snake_case )
UpperCAmelCase_ : int = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def UpperCamelCase__ ( self ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
self.check_over_configs(thresholding=_snake_case )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case ,prediction_type=_snake_case ,sample_max_value=_snake_case ,algorithm_type="deis" ,solver_order=_snake_case ,solver_type=_snake_case ,)
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case ,solver_type=_snake_case ,prediction_type=_snake_case ,algorithm_type=_snake_case ,)
UpperCAmelCase_ : Tuple = self.full_loop(
solver_order=_snake_case ,solver_type=_snake_case ,prediction_type=_snake_case ,algorithm_type=_snake_case ,)
assert not torch.isnan(_snake_case ).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self ):
self.check_over_configs(lower_order_final=_snake_case )
self.check_over_configs(lower_order_final=_snake_case )
def UpperCamelCase__ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=_snake_case ,time_step=0 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.full_loop()
UpperCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.23916 ) < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(thresholding=_snake_case ,dynamic_thresholding_ratio=0 )
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_snake_case )
UpperCAmelCase_ : Optional[Any] = 10
UpperCAmelCase_ : Dict = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Optional[int] = model(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = scheduler.step(_snake_case ,_snake_case ,_snake_case ).prev_sample
assert sample.dtype == torch.floataa
| 71 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = ["""model.decoder.embed_positions.weights"""]
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "emb" in name:
UpperCAmelCase_ : Tuple = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase_ : Tuple = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase_ : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase_ : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase_ : List[Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase_ : Tuple = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase_ : int = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase_ : str = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def a__ ( _SCREAMING_SNAKE_CASE : OrderedDict , _SCREAMING_SNAKE_CASE : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = list(state_dict.keys() )
UpperCAmelCase_ : Optional[Any] = {}
for key in keys:
UpperCAmelCase_ : Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase_ : Optional[Any] = val[:hidden_size, :]
UpperCAmelCase_ : Union[str, Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase_ : List[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase_ : Tuple = val
else:
UpperCAmelCase_ : str = val
return state_dict, enc_dec_proj_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
UpperCAmelCase_ : str = 10_24
UpperCAmelCase_ : int = 24
UpperCAmelCase_ : Dict = 16
elif checkpoint == "medium":
UpperCAmelCase_ : int = 15_36
UpperCAmelCase_ : List[Any] = 48
UpperCAmelCase_ : int = 24
elif checkpoint == "large":
UpperCAmelCase_ : List[Any] = 20_48
UpperCAmelCase_ : List[Any] = 48
UpperCAmelCase_ : int = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
UpperCAmelCase_ : List[str] = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Optional[int]="cpu" ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = fairseq_model.lm.state_dict()
UpperCAmelCase_ , UpperCAmelCase_ : Dict = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
UpperCAmelCase_ : Union[str, Any] = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase_ : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase_ : Union[str, Any] = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
UpperCAmelCase_ : Tuple = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
UpperCAmelCase_ : str = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase_ : Optional[int] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase_ : str = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase_ : List[Any] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase_ : Union[str, Any] = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
UpperCAmelCase_ : Dict = 20_48
UpperCAmelCase_ : List[Any] = 20_48
# set other default generation config params
UpperCAmelCase_ : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Any = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
_lowerCamelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 71 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 1 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_lowerCamelCase = datasets.load_iris()
_lowerCamelCase = np.array(data["""data"""])
_lowerCamelCase = np.array(data["""target"""])
_lowerCamelCase = data["""target_names"""]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = train_test_split(X, y)
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tuple ) -> int:
"""simple docstring"""
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple=5 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Any = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
UpperCAmelCase_ : List[Any] = []
for data_point in data:
UpperCAmelCase_ : str = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCAmelCase_ : str = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCAmelCase_ : Union[str, Any] = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 71 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 1 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_SCREAMING_SNAKE_CASE , n - 1 , _SCREAMING_SNAKE_CASE ) * a) % mod
else:
UpperCAmelCase_ : Any = binary_exponentiation(_SCREAMING_SNAKE_CASE , n / 2 , _SCREAMING_SNAKE_CASE )
return (b * b) % mod
# a prime number
_lowerCamelCase = 701
_lowerCamelCase = 10_0000_0000
_lowerCamelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 71 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : str =["image_processor"]
__A : List[str] ="SamImageProcessor"
def __init__( self ,_snake_case ):
super().__init__(_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.image_processor
UpperCAmelCase_ : Dict = -10
UpperCAmelCase_ : str = self.image_processor.size["longest_edge"]
def __call__( self ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = self.image_processor(
_snake_case ,return_tensors=_snake_case ,**_snake_case ,)
# pop arguments that are not used in the foward but used nevertheless
UpperCAmelCase_ : str = encoding_image_processor["original_sizes"]
if hasattr(_snake_case ,"numpy" ): # Checks if Torch or TF tensor
UpperCAmelCase_ : List[str] = original_sizes.numpy()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._check_and_preprocess_points(
input_points=_snake_case ,input_labels=_snake_case ,input_boxes=_snake_case ,)
UpperCAmelCase_ : str = self._normalize_and_convert(
_snake_case ,_snake_case ,input_points=_snake_case ,input_labels=_snake_case ,input_boxes=_snake_case ,return_tensors=_snake_case ,)
return encoding_image_processor
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case="pt" ,):
if input_points is not None:
if len(_snake_case ) != len(_snake_case ):
UpperCAmelCase_ : Any = [
self._normalize_coordinates(self.target_size ,_snake_case ,original_sizes[0] ) for point in input_points
]
else:
UpperCAmelCase_ : List[Any] = [
self._normalize_coordinates(self.target_size ,_snake_case ,_snake_case )
for point, original_size in zip(_snake_case ,_snake_case )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self._pad_points_and_labels(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = np.array(_snake_case )
if input_labels is not None:
UpperCAmelCase_ : Any = np.array(_snake_case )
if input_boxes is not None:
if len(_snake_case ) != len(_snake_case ):
UpperCAmelCase_ : Optional[int] = [
self._normalize_coordinates(self.target_size ,_snake_case ,original_sizes[0] ,is_bounding_box=_snake_case )
for box in input_boxes
]
else:
UpperCAmelCase_ : Any = [
self._normalize_coordinates(self.target_size ,_snake_case ,_snake_case ,is_bounding_box=_snake_case )
for box, original_size in zip(_snake_case ,_snake_case )
]
UpperCAmelCase_ : Dict = np.array(_snake_case )
if input_boxes is not None:
if return_tensors == "pt":
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case )
# boxes batch size of 1 by default
UpperCAmelCase_ : Dict = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCAmelCase_ : Optional[int] = tf.convert_to_tensor(_snake_case )
# boxes batch size of 1 by default
UpperCAmelCase_ : str = tf.expand_dims(_snake_case ,1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCAmelCase_ : List[Any] = torch.from_numpy(_snake_case )
# point batch size of 1 by default
UpperCAmelCase_ : List[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCAmelCase_ : str = tf.convert_to_tensor(_snake_case )
# point batch size of 1 by default
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(_snake_case ,1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCAmelCase_ : int = torch.from_numpy(_snake_case )
# point batch size of 1 by default
UpperCAmelCase_ : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCAmelCase_ : Tuple = tf.convert_to_tensor(_snake_case )
# point batch size of 1 by default
UpperCAmelCase_ : Optional[int] = tf.expand_dims(_snake_case ,1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = max([point.shape[0] for point in input_points] )
UpperCAmelCase_ : Any = []
for i, point in enumerate(_snake_case ):
if point.shape[0] != expected_nb_points:
UpperCAmelCase_ : str = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] ,axis=0 )
UpperCAmelCase_ : List[Any] = np.append(input_labels[i] ,[self.point_pad_value] )
processed_input_points.append(_snake_case )
UpperCAmelCase_ : Any = processed_input_points
return input_points, input_labels
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=False ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = original_size
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.image_processor._get_preprocess_shape(_snake_case ,longest_edge=_snake_case )
UpperCAmelCase_ : Optional[int] = deepcopy(_snake_case ).astype(_snake_case )
if is_bounding_box:
UpperCAmelCase_ : Dict = coords.reshape(-1 ,2 ,2 )
UpperCAmelCase_ : str = coords[..., 0] * (new_w / old_w)
UpperCAmelCase_ : List[str] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCAmelCase_ : Optional[int] = coords.reshape(-1 ,4 )
return coords
def UpperCamelCase__ ( self ,_snake_case=None ,_snake_case=None ,_snake_case=None ,):
if input_points is not None:
if hasattr(_snake_case ,"numpy" ): # Checks for TF or Torch tensor
UpperCAmelCase_ : Optional[int] = input_points.numpy().tolist()
if not isinstance(_snake_case ,_snake_case ) or not isinstance(input_points[0] ,_snake_case ):
raise ValueError("Input points must be a list of list of floating points." )
UpperCAmelCase_ : List[Any] = [np.array(_snake_case ) for input_point in input_points]
else:
UpperCAmelCase_ : Tuple = None
if input_labels is not None:
if hasattr(_snake_case ,"numpy" ):
UpperCAmelCase_ : int = input_labels.numpy().tolist()
if not isinstance(_snake_case ,_snake_case ) or not isinstance(input_labels[0] ,_snake_case ):
raise ValueError("Input labels must be a list of list integers." )
UpperCAmelCase_ : Optional[int] = [np.array(_snake_case ) for label in input_labels]
else:
UpperCAmelCase_ : Tuple = None
if input_boxes is not None:
if hasattr(_snake_case ,"numpy" ):
UpperCAmelCase_ : Dict = input_boxes.numpy().tolist()
if (
not isinstance(_snake_case ,_snake_case )
or not isinstance(input_boxes[0] ,_snake_case )
or not isinstance(input_boxes[0][0] ,_snake_case )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
UpperCAmelCase_ : str = [np.array(_snake_case ).astype(np.floataa ) for box in input_boxes]
else:
UpperCAmelCase_ : Tuple = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(_snake_case ) )
def UpperCamelCase__ ( self ,*_snake_case ,**_snake_case ):
return self.image_processor.post_process_masks(*_snake_case ,**_snake_case )
| 71 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Union[str, Any] ="markuplm"
def __init__( self ,_snake_case=3_05_22 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=2 ,_snake_case=0.02 ,_snake_case=1E-12 ,_snake_case=0 ,_snake_case=0 ,_snake_case=2 ,_snake_case=2_56 ,_snake_case=10_24 ,_snake_case=2_16 ,_snake_case=10_01 ,_snake_case=32 ,_snake_case=50 ,_snake_case="absolute" ,_snake_case=True ,_snake_case=None ,**_snake_case ,):
super().__init__(
pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Dict = position_embedding_type
UpperCAmelCase_ : str = use_cache
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
# additional properties
UpperCAmelCase_ : Tuple = max_depth
UpperCAmelCase_ : Optional[int] = max_xpath_tag_unit_embeddings
UpperCAmelCase_ : int = max_xpath_subs_unit_embeddings
UpperCAmelCase_ : List[str] = tag_pad_id
UpperCAmelCase_ : Optional[Any] = subs_pad_id
UpperCAmelCase_ : Dict = xpath_unit_hidden_size
| 71 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 1 |
'''simple docstring'''
from math import sqrt
def a__ ( _SCREAMING_SNAKE_CASE : int = 1_00_00_00 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 71 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=99 ,_snake_case=13 ,_snake_case=7 ,_snake_case=9 ,_snake_case=True ,_snake_case=True ,_snake_case=False ,_snake_case=32 ,_snake_case=5 ,_snake_case=4 ,_snake_case=37 ,_snake_case=8 ,_snake_case=0.1 ,_snake_case=0.002 ,_snake_case=1 ,_snake_case=0 ,_snake_case=0 ,_snake_case=None ,_snake_case=None ,):
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : int = encoder_seq_length
UpperCAmelCase_ : int = decoder_seq_length
# For common tests
UpperCAmelCase_ : Any = self.decoder_seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[Any] = use_attention_mask
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Any = d_ff
UpperCAmelCase_ : List[Any] = relative_attention_num_buckets
UpperCAmelCase_ : str = dropout_rate
UpperCAmelCase_ : Optional[int] = initializer_factor
UpperCAmelCase_ : Union[str, Any] = eos_token_id
UpperCAmelCase_ : int = pad_token_id
UpperCAmelCase_ : List[str] = decoder_start_token_id
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Union[str, Any] = decoder_layers
def UpperCamelCase__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,):
if attention_mask is None:
UpperCAmelCase_ : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase_ : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase_ : List[str] = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=_snake_case )
if decoder_head_mask is None:
UpperCAmelCase_ : str = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=_snake_case )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Optional[Any] = torch.ones(
config.num_decoder_layers ,config.num_attention_heads ,device=_snake_case )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size )
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase_ : Tuple = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase_ : Any = self.get_config()
UpperCAmelCase_ : Union[str, Any] = config.num_attention_heads
UpperCAmelCase_ : List[Any] = self.prepare_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, input_dict
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase__ ( self ):
return TaConfig(
vocab_size=1_66 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def UpperCamelCase__ ( self ):
return TaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase_ : int = UMTaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
input_ids=_snake_case ,decoder_input_ids=_snake_case ,attention_mask=_snake_case ,decoder_attention_mask=_snake_case ,)
UpperCAmelCase_ : str = model(input_ids=_snake_case ,decoder_input_ids=_snake_case )
UpperCAmelCase_ : Union[str, Any] = result.last_hidden_state
UpperCAmelCase_ : List[Any] = result.past_key_values
UpperCAmelCase_ : Any = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_snake_case ) ,config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) ,4 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase_ : Dict = UMTaModel(config=_snake_case ).get_decoder().to(_snake_case ).eval()
# first forward pass
UpperCAmelCase_ : Dict = model(_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ : int = model(_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Dict = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# append to next input_ids and
UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase_ : Dict = model(_snake_case )["last_hidden_state"]
UpperCAmelCase_ : Dict = model(_snake_case ,past_key_values=_snake_case )["last_hidden_state"]
# select random slice
UpperCAmelCase_ : str = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case ,_snake_case ,atol=1E-3 ) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,):
UpperCAmelCase_ : Dict = UMTaModel(config=_snake_case ).to(_snake_case ).half().eval()
UpperCAmelCase_ : List[str] = model(**_snake_case )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(_snake_case ).any().item() )
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__A : Any =(UMTaForConditionalGeneration,) if is_torch_available() else ()
__A : str =(
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__A : List[Any] =True
__A : str =False
__A : List[str] =False
__A : int =True
__A : Tuple =True
# The small UMT5 model needs higher percentages for CPU/MP tests
__A : Optional[int] =[0.8, 0.9]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Union[str, Any] = UMTaModel(config_and_inputs[0] ).to(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_snake_case ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,f'''{tmpdirname}/t5_test.onnx''' ,export_params=_snake_case ,opset_version=9 ,input_names=["input_ids", "decoder_input_ids"] ,)
@unittest.skipIf(torch_device == "cpu" ,"Cant do half precision" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : str = config_and_inputs[0]
UpperCAmelCase_ : Optional[int] = UMTaForConditionalGeneration(_snake_case ).eval()
model.to(_snake_case )
UpperCAmelCase_ : Optional[Any] = {
"head_mask": torch.zeros(config.num_layers ,config.num_heads ,device=_snake_case ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_snake_case ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers ,config.num_heads ,device=_snake_case ),
}
for attn_name, (name, mask) in zip(_snake_case ,head_masking.items() ):
UpperCAmelCase_ : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase_ : str = torch.ones(
config.num_decoder_layers ,config.num_heads ,device=_snake_case )
UpperCAmelCase_ : int = model.generate(
config_and_inputs[1]["input_ids"] ,num_beams=1 ,max_length=3 ,output_attentions=_snake_case ,return_dict_in_generate=_snake_case ,**_snake_case ,)
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase_ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def UpperCamelCase__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case (unittest.TestCase):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" ,return_dict=_snake_case ).to(_snake_case )
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained("google/umt5-small" ,use_fast=_snake_case ,legacy=_snake_case )
UpperCAmelCase_ : Optional[int] = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
UpperCAmelCase_ : Tuple = tokenizer(_snake_case ,return_tensors="pt" ,padding=_snake_case ).input_ids
# fmt: off
UpperCAmelCase_ : Any = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(_snake_case ,_snake_case )
UpperCAmelCase_ : int = model.generate(input_ids.to(_snake_case ) )
UpperCAmelCase_ : int = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
UpperCAmelCase_ : Dict = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case ,_snake_case )
| 71 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 1 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = """PoolFormerConfig"""
# Base docstring
_lowerCamelCase = """sail/poolformer_s12"""
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = """sail/poolformer_s12"""
_lowerCamelCase = """tabby, tabby cat"""
_lowerCamelCase = [
"""sail/poolformer_s12""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : bool = False ) -> Tuple:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
UpperCAmelCase_ : Any = 1 - drop_prob
UpperCAmelCase_ : Dict = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
UpperCAmelCase_ : Dict = keep_prob + torch.rand(_SCREAMING_SNAKE_CASE , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
UpperCAmelCase_ : Optional[Any] = input.div(_SCREAMING_SNAKE_CASE ) * random_tensor
return output
class _snake_case (nn.Module):
def __init__( self ,_snake_case = None ):
super().__init__()
UpperCAmelCase_ : List[Any] = drop_prob
def UpperCamelCase__ ( self ,_snake_case ):
return drop_path(_snake_case ,self.drop_prob ,self.training )
def UpperCamelCase__ ( self ):
return "p={}".format(self.drop_prob )
class _snake_case (nn.Module):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
super().__init__()
UpperCAmelCase_ : int = patch_size if isinstance(_snake_case ,collections.abc.Iterable ) else (patch_size, patch_size)
UpperCAmelCase_ : Optional[int] = stride if isinstance(_snake_case ,collections.abc.Iterable ) else (stride, stride)
UpperCAmelCase_ : Union[str, Any] = padding if isinstance(_snake_case ,collections.abc.Iterable ) else (padding, padding)
UpperCAmelCase_ : Optional[int] = nn.Convad(_snake_case ,_snake_case ,kernel_size=_snake_case ,stride=_snake_case ,padding=_snake_case )
UpperCAmelCase_ : Optional[Any] = norm_layer(_snake_case ) if norm_layer else nn.Identity()
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = self.projection(_snake_case )
UpperCAmelCase_ : Any = self.norm(_snake_case )
return embeddings
class _snake_case (nn.GroupNorm):
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(1 ,_snake_case ,**_snake_case )
class _snake_case (nn.Module):
def __init__( self ,_snake_case ):
super().__init__()
UpperCAmelCase_ : List[Any] = nn.AvgPoolad(_snake_case ,stride=1 ,padding=pool_size // 2 ,count_include_pad=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
return self.pool(_snake_case ) - hidden_states
class _snake_case (nn.Module):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.Convad(_snake_case ,_snake_case ,1 )
UpperCAmelCase_ : int = nn.Convad(_snake_case ,_snake_case ,1 )
UpperCAmelCase_ : Tuple = PoolFormerDropPath(_snake_case )
if isinstance(config.hidden_act ,_snake_case ):
UpperCAmelCase_ : Dict = ACTaFN[config.hidden_act]
else:
UpperCAmelCase_ : List[Any] = config.hidden_act
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = self.conva(_snake_case )
UpperCAmelCase_ : Tuple = self.act_fn(_snake_case )
UpperCAmelCase_ : Tuple = self.drop(_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.conva(_snake_case )
UpperCAmelCase_ : str = self.drop(_snake_case )
return hidden_states
class _snake_case (nn.Module):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
super().__init__()
UpperCAmelCase_ : int = PoolFormerPooling(_snake_case )
UpperCAmelCase_ : Any = PoolFormerOutput(_snake_case ,_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = PoolFormerGroupNorm(_snake_case )
UpperCAmelCase_ : Tuple = PoolFormerGroupNorm(_snake_case )
# Useful for training neural nets
UpperCAmelCase_ : Optional[Any] = PoolFormerDropPath(_snake_case ) if drop_path > 0.0 else nn.Identity()
UpperCAmelCase_ : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
UpperCAmelCase_ : str = nn.Parameter(
config.layer_scale_init_value * torch.ones((_snake_case) ) ,requires_grad=_snake_case )
UpperCAmelCase_ : List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_snake_case) ) ,requires_grad=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
if self.use_layer_scale:
UpperCAmelCase_ : Dict = self.pooling(self.before_norm(_snake_case ) )
UpperCAmelCase_ : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
UpperCAmelCase_ : Optional[int] = hidden_states + self.drop_path(_snake_case )
UpperCAmelCase_ : Optional[Any] = ()
UpperCAmelCase_ : List[str] = self.output(self.after_norm(_snake_case ) )
UpperCAmelCase_ : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
UpperCAmelCase_ : List[str] = hidden_states + self.drop_path(_snake_case )
UpperCAmelCase_ : Any = (output,) + outputs
return outputs
else:
UpperCAmelCase_ : Tuple = self.drop_path(self.pooling(self.before_norm(_snake_case ) ) )
# First residual connection
UpperCAmelCase_ : List[Any] = pooling_output + hidden_states
UpperCAmelCase_ : Optional[int] = ()
# Second residual connection inside the PoolFormerOutput block
UpperCAmelCase_ : Optional[int] = self.drop_path(self.output(self.after_norm(_snake_case ) ) )
UpperCAmelCase_ : List[Any] = hidden_states + layer_output
UpperCAmelCase_ : Optional[Any] = (output,) + outputs
return outputs
class _snake_case (nn.Module):
def __init__( self ,_snake_case ):
super().__init__()
UpperCAmelCase_ : Any = config
# stochastic depth decay rule
UpperCAmelCase_ : Optional[Any] = [x.item() for x in torch.linspace(0 ,config.drop_path_rate ,sum(config.depths ) )]
# patch embeddings
UpperCAmelCase_ : str = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] ,stride=config.strides[i] ,padding=config.padding[i] ,num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] ,hidden_size=config.hidden_sizes[i] ,) )
UpperCAmelCase_ : List[Any] = nn.ModuleList(_snake_case )
# Transformer blocks
UpperCAmelCase_ : int = []
UpperCAmelCase_ : List[Any] = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
UpperCAmelCase_ : str = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_snake_case ,num_channels=config.hidden_sizes[i] ,pool_size=config.pool_size ,hidden_size=config.hidden_sizes[i] ,intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) ,drop_path=dpr[cur + j] ,) )
blocks.append(nn.ModuleList(_snake_case ) )
UpperCAmelCase_ : List[Any] = nn.ModuleList(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=False ,_snake_case=True ):
UpperCAmelCase_ : Tuple = () if output_hidden_states else None
UpperCAmelCase_ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings ,self.block ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = layers
# Get patch embeddings from hidden_states
UpperCAmelCase_ : Optional[int] = embedding_layer(_snake_case )
# Send the embeddings through the blocks
for _, blk in enumerate(_snake_case ):
UpperCAmelCase_ : int = blk(_snake_case )
UpperCAmelCase_ : Optional[Any] = layer_outputs[0]
if output_hidden_states:
UpperCAmelCase_ : List[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[str] =PoolFormerConfig
__A : Dict ="poolformer"
__A : Any ="pixel_values"
__A : Optional[Any] =True
def UpperCamelCase__ ( self ,_snake_case ):
if isinstance(_snake_case ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_snake_case ,nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=False ):
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Any = value
_lowerCamelCase = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCamelCase = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
"""
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ):
super().__init__(_snake_case )
UpperCAmelCase_ : str = config
UpperCAmelCase_ : str = PoolFormerEncoder(_snake_case )
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase__ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCamelCase__ ( self ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
UpperCAmelCase_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
UpperCAmelCase_ : str = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ,)
UpperCAmelCase_ : Tuple = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_snake_case ,hidden_states=encoder_outputs.hidden_states ,)
class _snake_case (nn.Module):
def __init__( self ,_snake_case ):
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size ,config.hidden_size )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Dict = self.dense(_snake_case )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , __SCREAMING_SNAKE_CASE , )
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ):
super().__init__(_snake_case )
UpperCAmelCase_ : List[Any] = config.num_labels
UpperCAmelCase_ : Any = PoolFormerModel(_snake_case )
# Final norm
UpperCAmelCase_ : Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
UpperCAmelCase_ : Optional[Any] = (
nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCamelCase__ ( self ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,):
UpperCAmelCase_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Union[str, Any] = self.poolformer(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ,)
UpperCAmelCase_ : Any = outputs[0]
UpperCAmelCase_ : str = self.classifier(self.norm(_snake_case ).mean([-2, -1] ) )
UpperCAmelCase_ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ : List[str] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ : Optional[int] = "single_label_classification"
else:
UpperCAmelCase_ : Tuple = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCAmelCase_ : str = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ : int = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
UpperCAmelCase_ : List[str] = loss_fct(_snake_case ,_snake_case )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ : Any = CrossEntropyLoss()
UpperCAmelCase_ : List[str] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ : Optional[int] = BCEWithLogitsLoss()
UpperCAmelCase_ : Optional[int] = loss_fct(_snake_case ,_snake_case )
if not return_dict:
UpperCAmelCase_ : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 71 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowerCamelCase = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]:
"""simple docstring"""
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 71 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 1 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list ) -> list:
"""simple docstring"""
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Optional[Any] = False
for j in range(_SCREAMING_SNAKE_CASE , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : Union[str, Any] = True
for j in range(_SCREAMING_SNAKE_CASE ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : str = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Optional[int] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase = [int(item) for item in user_input.split(""",""")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 71 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : torch.FloatTensor
__A : torch.FloatTensor
__A : Optional[torch.FloatTensor] =None
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : List[Any] =2
@register_to_config
def __init__( self ,_snake_case = 0.02 ,_snake_case = 1_00 ,_snake_case = 1.007 ,_snake_case = 80 ,_snake_case = 0.05 ,_snake_case = 50 ,):
# standard deviation of the initial noise distribution
UpperCAmelCase_ : List[str] = sigma_max
# setable values
UpperCAmelCase_ : int = None
UpperCAmelCase_ : np.IntTensor = None
UpperCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
return sample
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : Optional[Any] = num_inference_steps
UpperCAmelCase_ : Any = np.arange(0 ,self.num_inference_steps )[::-1].copy()
UpperCAmelCase_ : int = torch.from_numpy(_snake_case ).to(_snake_case )
UpperCAmelCase_ : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCAmelCase_ : List[Any] = torch.tensor(_snake_case ,dtype=torch.floataa ,device=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : List[Any] = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
UpperCAmelCase_ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : int = self.config.s_noise * randn_tensor(sample.shape ,generator=_snake_case ).to(sample.device )
UpperCAmelCase_ : str = sigma + gamma * sigma
UpperCAmelCase_ : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = True ,):
UpperCAmelCase_ : Optional[int] = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_snake_case ,derivative=_snake_case ,pred_original_sample=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = True ,):
UpperCAmelCase_ : List[Any] = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_snake_case ,derivative=_snake_case ,pred_original_sample=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
raise NotImplementedError()
| 71 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = image.size
UpperCAmelCase_ , UpperCAmelCase_ : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase_ : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
UpperCAmelCase_ : Tuple = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ : Dict = torch.from_numpy(_SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,):
super().__init__()
self.register_modules(vqvae=_snake_case ,unet=_snake_case ,scheduler=_snake_case )
@torch.no_grad()
def __call__( self ,_snake_case = None ,_snake_case = 1 ,_snake_case = 1_00 ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = "pil" ,_snake_case = True ,):
if isinstance(_snake_case ,PIL.Image.Image ):
UpperCAmelCase_ : Dict = 1
elif isinstance(_snake_case ,torch.Tensor ):
UpperCAmelCase_ : int = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case )}''' )
if isinstance(_snake_case ,PIL.Image.Image ):
UpperCAmelCase_ : Optional[int] = preprocess(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase_ : str = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase_ : List[Any] = next(self.unet.parameters() ).dtype
UpperCAmelCase_ : int = randn_tensor(_snake_case ,generator=_snake_case ,device=self.device ,dtype=_snake_case )
UpperCAmelCase_ : List[Any] = image.to(device=self.device ,dtype=_snake_case )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_snake_case ,device=self.device )
UpperCAmelCase_ : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ : Union[str, Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ : str = {}
if accepts_eta:
UpperCAmelCase_ : Dict = eta
for t in self.progress_bar(_snake_case ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase_ : Optional[Any] = torch.cat([latents, image] ,dim=1 )
UpperCAmelCase_ : Tuple = self.scheduler.scale_model_input(_snake_case ,_snake_case )
# predict the noise residual
UpperCAmelCase_ : List[Any] = self.unet(_snake_case ,_snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : str = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,**_snake_case ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase_ : List[Any] = self.vqvae.decode(_snake_case ).sample
UpperCAmelCase_ : Tuple = torch.clamp(_snake_case ,-1.0 ,1.0 )
UpperCAmelCase_ : Any = image / 2 + 0.5
UpperCAmelCase_ : str = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : Optional[Any] = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 71 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 1 |
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_lowerCamelCase = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = SavedModel()
UpperCAmelCase_ : Any = []
with open(os.path.join(_SCREAMING_SNAKE_CASE , "utils" , "tf_ops" , "onnx.json" ) ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_SCREAMING_SNAKE_CASE )] )
with open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
saved_model.ParseFromString(f.read() )
UpperCAmelCase_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
UpperCAmelCase_ : List[Any] = sorted(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_SCREAMING_SNAKE_CASE )
if strict and len(_SCREAMING_SNAKE_CASE ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_SCREAMING_SNAKE_CASE ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*_SCREAMING_SNAKE_CASE , sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
_lowerCamelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 71 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple ="funnel"
__A : List[Any] ={
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self ,_snake_case=3_05_22 ,_snake_case=[4, 4, 4] ,_snake_case=None ,_snake_case=2 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=64 ,_snake_case=30_72 ,_snake_case="gelu_new" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=0.0 ,_snake_case=0.1 ,_snake_case=None ,_snake_case=1E-9 ,_snake_case="mean" ,_snake_case="relative_shift" ,_snake_case=True ,_snake_case=True ,_snake_case=True ,**_snake_case ,):
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Dict = block_sizes
UpperCAmelCase_ : List[str] = [1] * len(_snake_case ) if block_repeats is None else block_repeats
assert len(_snake_case ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCAmelCase_ : Optional[Any] = num_decoder_layers
UpperCAmelCase_ : Tuple = d_model
UpperCAmelCase_ : Union[str, Any] = n_head
UpperCAmelCase_ : Union[str, Any] = d_head
UpperCAmelCase_ : Any = d_inner
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Optional[int] = activation_dropout
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : str = initializer_std
UpperCAmelCase_ : int = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
UpperCAmelCase_ : int = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
UpperCAmelCase_ : int = attention_type
UpperCAmelCase_ : Any = separate_cls
UpperCAmelCase_ : Optional[Any] = truncate_seq
UpperCAmelCase_ : Optional[int] = pool_q_only
super().__init__(**_snake_case )
@property
def UpperCamelCase__ ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCamelCase__ ( self ,_snake_case ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def UpperCamelCase__ ( self ):
return len(self.block_sizes )
@num_blocks.setter
def UpperCamelCase__ ( self ,_snake_case ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 71 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _snake_case :
__A : torch.Tensor # [batch_size x 3]
__A : torch.Tensor # [batch_size x 3]
__A : torch.Tensor # [batch_size x 3]
__A : torch.Tensor # [batch_size x 3]
__A : int
__A : int
__A : float
__A : float
__A : Tuple[int]
def UpperCamelCase__ ( self ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def UpperCamelCase__ ( self ):
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def UpperCamelCase__ ( self ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = torch.arange(self.height * self.width )
UpperCAmelCase_ : Tuple = torch.stack(
[
pixel_indices % self.width,
torch.div(_snake_case ,self.width ,rounding_mode="trunc" ),
] ,axis=1 ,)
return coords
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , *UpperCAmelCase_ : Optional[int] = self.shape
UpperCAmelCase_ : Dict = int(np.prod(_snake_case ) )
UpperCAmelCase_ : Optional[int] = self.get_image_coords()
UpperCAmelCase_ : List[Any] = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Dict = self.get_camera_rays(_snake_case )
UpperCAmelCase_ : int = rays.view(_snake_case ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_snake_case ,-1 ,2 )
UpperCAmelCase_ : Optional[Any] = self.resolution()
UpperCAmelCase_ : Optional[Any] = self.fov()
UpperCAmelCase_ : Optional[int] = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : str = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : str = fracs.view(_snake_case ,-1 ,2 )
UpperCAmelCase_ : Optional[Any] = (
self.z.view(_snake_case ,1 ,3 )
+ self.x.view(_snake_case ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(_snake_case ,1 ,3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : List[Any] = directions / directions.norm(dim=-1 ,keepdim=_snake_case )
UpperCAmelCase_ : str = torch.stack(
[
torch.broadcast_to(self.origin.view(_snake_case ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(_snake_case ,*_snake_case ,2 ,3 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=_snake_case ,height=_snake_case ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Union[str, Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
UpperCAmelCase_ : Tuple = np.array([np.sin(_SCREAMING_SNAKE_CASE ), np.cos(_SCREAMING_SNAKE_CASE ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[Any] = -z * 4
UpperCAmelCase_ : List[str] = np.array([np.cos(_SCREAMING_SNAKE_CASE ), -np.sin(_SCREAMING_SNAKE_CASE ), 0.0] )
UpperCAmelCase_ : Dict = np.cross(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
origins.append(_SCREAMING_SNAKE_CASE )
xs.append(_SCREAMING_SNAKE_CASE )
ys.append(_SCREAMING_SNAKE_CASE )
zs.append(_SCREAMING_SNAKE_CASE )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_SCREAMING_SNAKE_CASE )) , )
| 71 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 1 |
'''simple docstring'''
_lowerCamelCase = 8.3_14_45_98
def a__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_lowerCamelCase = 300
_lowerCamelCase = 28
_lowerCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 71 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = """RegNetConfig"""
# Base docstring
_lowerCamelCase = """facebook/regnet-y-040"""
_lowerCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_lowerCamelCase = """facebook/regnet-y-040"""
_lowerCamelCase = """tabby, tabby cat"""
_lowerCamelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case = 3 ,_snake_case = 1 ,_snake_case = 1 ,_snake_case = "relu" ,**_snake_case ,):
super().__init__(**_snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCAmelCase_ : Any = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCAmelCase_ : Dict = tf.keras.layers.ConvaD(
filters=_snake_case ,kernel_size=_snake_case ,strides=_snake_case ,padding="VALID" ,groups=_snake_case ,use_bias=_snake_case ,name="convolution" ,)
UpperCAmelCase_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
UpperCAmelCase_ : Dict = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = self.convolution(self.padding(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = self.normalization(_snake_case )
UpperCAmelCase_ : str = self.activation(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Tuple = config.num_channels
UpperCAmelCase_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,)
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = shape_list(_snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCAmelCase_ : Tuple = tf.transpose(_snake_case ,perm=(0, 2, 3, 1) )
UpperCAmelCase_ : Optional[Any] = self.embedder(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case = 2 ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = tf.keras.layers.ConvaD(
filters=_snake_case ,kernel_size=1 ,strides=_snake_case ,use_bias=_snake_case ,name="convolution" )
UpperCAmelCase_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = False ):
return self.normalization(self.convolution(_snake_case ) ,training=_snake_case )
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case ,name="pooler" )
UpperCAmelCase_ : Tuple = [
tf.keras.layers.ConvaD(filters=_snake_case ,kernel_size=1 ,activation="relu" ,name="attention.0" ),
tf.keras.layers.ConvaD(filters=_snake_case ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ),
]
def UpperCamelCase__ ( self ,_snake_case ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
UpperCAmelCase_ : Optional[Any] = self.pooler(_snake_case )
for layer_module in self.attention:
UpperCAmelCase_ : Dict = layer_module(_snake_case )
UpperCAmelCase_ : Any = hidden_state * pooled
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1 ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : str = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Optional[int] = max(1 ,out_channels // config.groups_width )
UpperCAmelCase_ : Tuple = (
TFRegNetShortCut(_snake_case ,stride=_snake_case ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCAmelCase_ : List[str] = [
TFRegNetConvLayer(_snake_case ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetConvLayer(_snake_case ,kernel_size=1 ,activation=_snake_case ,name="layer.2" ),
]
UpperCAmelCase_ : List[Any] = ACTaFN[config.hidden_act]
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase_ : str = layer_module(_snake_case )
UpperCAmelCase_ : List[str] = self.shortcut(_snake_case )
hidden_state += residual
UpperCAmelCase_ : Any = self.activation(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1 ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : int = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Optional[Any] = max(1 ,out_channels // config.groups_width )
UpperCAmelCase_ : Optional[Any] = (
TFRegNetShortCut(_snake_case ,stride=_snake_case ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
UpperCAmelCase_ : Optional[Any] = [
TFRegNetConvLayer(_snake_case ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
_snake_case ,stride=_snake_case ,groups=_snake_case ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetSELayer(_snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ),
TFRegNetConvLayer(_snake_case ,kernel_size=1 ,activation=_snake_case ,name="layer.3" ),
]
UpperCAmelCase_ : List[str] = ACTaFN[config.hidden_act]
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = hidden_state
for layer_module in self.layers:
UpperCAmelCase_ : Union[str, Any] = layer_module(_snake_case )
UpperCAmelCase_ : Tuple = self.shortcut(_snake_case )
hidden_state += residual
UpperCAmelCase_ : Dict = self.activation(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 2 ,_snake_case = 2 ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Dict = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
UpperCAmelCase_ : Dict = [
# downsampling is done in the first layer with stride of 2
layer(_snake_case ,_snake_case ,_snake_case ,stride=_snake_case ,name="layers.0" ),
*[layer(_snake_case ,_snake_case ,_snake_case ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCamelCase__ ( self ,_snake_case ):
for layer_module in self.layers:
UpperCAmelCase_ : int = layer_module(_snake_case )
return hidden_state
class _snake_case (tf.keras.layers.Layer):
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[int] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) )
UpperCAmelCase_ : Union[str, Any] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_snake_case ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_snake_case ,_snake_case ,_snake_case ,depth=_snake_case ,name=f'''stages.{i+1}''' ) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = False ,_snake_case = True ):
UpperCAmelCase_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ : List[Any] = hidden_states + (hidden_state,)
UpperCAmelCase_ : int = stage_module(_snake_case )
if output_hidden_states:
UpperCAmelCase_ : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_snake_case ,hidden_states=_snake_case )
@keras_serializable
class _snake_case (tf.keras.layers.Layer):
__A : List[Any] =RegNetConfig
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Any = config
UpperCAmelCase_ : int = TFRegNetEmbeddings(_snake_case ,name="embedder" )
UpperCAmelCase_ : List[str] = TFRegNetEncoder(_snake_case ,name="encoder" )
UpperCAmelCase_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_snake_case ,name="pooler" )
@unpack_inputs
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
UpperCAmelCase_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Optional[int] = self.embedder(_snake_case ,training=_snake_case )
UpperCAmelCase_ : List[Any] = self.encoder(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ,training=_snake_case )
UpperCAmelCase_ : List[str] = encoder_outputs[0]
UpperCAmelCase_ : Optional[int] = self.pooler(_snake_case )
# Change to NCHW output format have uniformity in the modules
UpperCAmelCase_ : Dict = tf.transpose(_snake_case ,perm=(0, 3, 1, 2) )
UpperCAmelCase_ : List[str] = tf.transpose(_snake_case ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCAmelCase_ : int = tuple([tf.transpose(_snake_case ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_snake_case ,pooler_output=_snake_case ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Union[str, Any] =RegNetConfig
__A : Optional[Any] ="regnet"
__A : List[Any] ="pixel_values"
@property
def UpperCamelCase__ ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )}
_lowerCamelCase = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_lowerCamelCase = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,*_snake_case ,**_snake_case ):
super().__init__(_snake_case ,*_snake_case ,**_snake_case )
UpperCAmelCase_ : Optional[int] = TFRegNetMainLayer(_snake_case ,name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case=False ,):
UpperCAmelCase_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Optional[Any] = self.regnet(
pixel_values=_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ,training=_snake_case ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,*_snake_case ,**_snake_case ):
super().__init__(_snake_case ,*_snake_case ,**_snake_case )
UpperCAmelCase_ : Dict = config.num_labels
UpperCAmelCase_ : List[str] = TFRegNetMainLayer(_snake_case ,name="regnet" )
# classification head
UpperCAmelCase_ : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCamelCase__ ( self ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case=False ,):
UpperCAmelCase_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Any = self.regnet(
_snake_case ,output_hidden_states=_snake_case ,return_dict=_snake_case ,training=_snake_case )
UpperCAmelCase_ : List[str] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ : Union[str, Any] = self.classifier[0](_snake_case )
UpperCAmelCase_ : List[str] = self.classifier[1](_snake_case )
UpperCAmelCase_ : str = None if labels is None else self.hf_compute_loss(labels=_snake_case ,logits=_snake_case )
if not return_dict:
UpperCAmelCase_ : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states )
| 71 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 1 |
'''simple docstring'''
import re
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
if len(re.findall("[ATCG]" , _SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = original_name.split("." )[0]
UpperCAmelCase_ : List[str] = key.split("." )
UpperCAmelCase_ : Any = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 2] )
UpperCAmelCase_ : Dict = int(key_list[key_list.index(_SCREAMING_SNAKE_CASE ) - 1] )
UpperCAmelCase_ : Optional[int] = orig_block_num - offset
UpperCAmelCase_ : Tuple = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = OrderedDict()
UpperCAmelCase_ , UpperCAmelCase_ : Any = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
UpperCAmelCase_ : Dict = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
UpperCAmelCase_ : List[str] = key[: key.find("proj" )]
UpperCAmelCase_ : List[Any] = key.replace(_SCREAMING_SNAKE_CASE , F'''patch_embeddings.{total_embed_found}.''' )
UpperCAmelCase_ : List[str] = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
UpperCAmelCase_ : str = "poolformer.encoder." + key
if "mlp.fc1" in key:
UpperCAmelCase_ : Dict = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
UpperCAmelCase_ : int = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
UpperCAmelCase_ : Optional[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
UpperCAmelCase_ : Optional[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
UpperCAmelCase_ : List[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
UpperCAmelCase_ : List[Any] = replace_key_with_offset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
UpperCAmelCase_ : List[Any] = key.replace("head" , "classifier" )
UpperCAmelCase_ : Any = value
return new_state_dict
def a__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = PoolFormerConfig()
# set attributes based on model_name
UpperCAmelCase_ : Any = "huggingface/label-files"
UpperCAmelCase_ : str = model_name[-3:]
UpperCAmelCase_ : Union[str, Any] = 10_00
UpperCAmelCase_ : Union[str, Any] = "imagenet-1k-id2label.json"
UpperCAmelCase_ : Tuple = (1, 10_00)
# set config attributes
UpperCAmelCase_ : int = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : str = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
if size == "s12":
UpperCAmelCase_ : Tuple = [2, 2, 6, 2]
UpperCAmelCase_ : int = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : int = 4.0
UpperCAmelCase_ : Optional[int] = 0.9
elif size == "s24":
UpperCAmelCase_ : str = [4, 4, 12, 4]
UpperCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : List[Any] = 4.0
UpperCAmelCase_ : Optional[Any] = 0.9
elif size == "s36":
UpperCAmelCase_ : str = [6, 6, 18, 6]
UpperCAmelCase_ : str = [64, 1_28, 3_20, 5_12]
UpperCAmelCase_ : Union[str, Any] = 4.0
UpperCAmelCase_ : Any = 1E-6
UpperCAmelCase_ : Any = 0.9
elif size == "m36":
UpperCAmelCase_ : int = [6, 6, 18, 6]
UpperCAmelCase_ : Any = [96, 1_92, 3_84, 7_68]
UpperCAmelCase_ : Union[str, Any] = 4.0
UpperCAmelCase_ : int = 1E-6
UpperCAmelCase_ : Optional[Any] = 0.95
elif size == "m48":
UpperCAmelCase_ : str = [8, 8, 24, 8]
UpperCAmelCase_ : Optional[int] = [96, 1_92, 3_84, 7_68]
UpperCAmelCase_ : str = 4.0
UpperCAmelCase_ : List[str] = 1E-6
UpperCAmelCase_ : Optional[int] = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
UpperCAmelCase_ : Dict = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
# Prepare image
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
UpperCAmelCase_ : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
UpperCAmelCase_ : Tuple = rename_keys(_SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
UpperCAmelCase_ : Optional[int] = PoolFormerForImageClassification(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
UpperCAmelCase_ : List[str] = PoolFormerImageProcessor(crop_pct=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.logits
# define expected logit slices for different models
if size == "s12":
UpperCAmelCase_ : Any = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
UpperCAmelCase_ : str = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
UpperCAmelCase_ : Dict = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
UpperCAmelCase_ : Optional[Any] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
UpperCAmelCase_ : Any = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 71 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
# TODO Update this
_lowerCamelCase = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Any ="esm"
def __init__( self ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=10_26 ,_snake_case=0.02 ,_snake_case=1E-12 ,_snake_case="absolute" ,_snake_case=True ,_snake_case=None ,_snake_case=False ,_snake_case=False ,_snake_case=None ,_snake_case=None ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,mask_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Optional[int] = position_embedding_type
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : List[str] = emb_layer_norm_before
UpperCAmelCase_ : Dict = token_dropout
UpperCAmelCase_ : int = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase_ : str = EsmFoldConfig()
elif isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = EsmFoldConfig(**_snake_case )
UpperCAmelCase_ : Tuple = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase_ : Union[str, Any] = get_default_vocab_list()
else:
UpperCAmelCase_ : int = vocab_list
else:
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"use_esm_attn_map" ,_snake_case ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = super().to_dict()
if isinstance(self.esmfold_config ,_snake_case ):
UpperCAmelCase_ : int = self.esmfold_config.to_dict()
return output
@dataclass
class _snake_case :
__A : str =None
__A : bool =True
__A : bool =False
__A : bool =False
__A : bool =False
__A : float =0
__A : bool =True
__A : bool =False
__A : int =1_28
__A : "TrunkConfig" =None
def UpperCamelCase__ ( self ):
if self.trunk is None:
UpperCAmelCase_ : int = TrunkConfig()
elif isinstance(self.trunk ,_snake_case ):
UpperCAmelCase_ : Optional[Any] = TrunkConfig(**self.trunk )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = asdict(self )
UpperCAmelCase_ : Tuple = self.trunk.to_dict()
return output
@dataclass
class _snake_case :
__A : int =48
__A : int =10_24
__A : int =1_28
__A : int =32
__A : int =32
__A : int =32
__A : float =0
__A : float =0
__A : bool =False
__A : int =4
__A : Optional[int] =1_28
__A : "StructureModuleConfig" =None
def UpperCamelCase__ ( self ):
if self.structure_module is None:
UpperCAmelCase_ : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module ,_snake_case ):
UpperCAmelCase_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase_ : Tuple = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_ : List[str] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = asdict(self )
UpperCAmelCase_ : List[Any] = self.structure_module.to_dict()
return output
@dataclass
class _snake_case :
__A : int =3_84
__A : int =1_28
__A : int =16
__A : int =1_28
__A : int =12
__A : int =4
__A : int =8
__A : float =0.1
__A : int =8
__A : int =1
__A : int =2
__A : int =7
__A : int =10
__A : float =1e-8
__A : float =1e5
def UpperCamelCase__ ( self ):
return asdict(self )
def a__ ( ) -> str:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 71 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
def a__ ( _SCREAMING_SNAKE_CASE : Callable[[int | float], int | float] , _SCREAMING_SNAKE_CASE : int | float , _SCREAMING_SNAKE_CASE : int | float , _SCREAMING_SNAKE_CASE : int = 1_00 , ) -> float:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = x_start
UpperCAmelCase_ : Optional[Any] = fnc(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase_ : int = (x_end - x_start) / steps + xa
UpperCAmelCase_ : str = fnc(_SCREAMING_SNAKE_CASE )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase_ : Any = xa
UpperCAmelCase_ : List[Any] = fxa
return area
if __name__ == "__main__":
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
return x**3 + x**2
print("""f(x) = x^3 + x^2""")
print("""The area between the curve, x = -5, x = 5 and the x axis is:""")
_lowerCamelCase = 10
while i <= 10_0000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 71 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
from __future__ import annotations
_lowerCamelCase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCamelCase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a__ ( _SCREAMING_SNAKE_CASE : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : float = -1
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if arr[i] < arr[j]:
UpperCAmelCase_ : List[Any] = arr[j]
break
result.append(_SCREAMING_SNAKE_CASE )
return result
def a__ ( _SCREAMING_SNAKE_CASE : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
for i, outer in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase_ : Optional[int] = inner
break
result.append(_SCREAMING_SNAKE_CASE )
return result
def a__ ( _SCREAMING_SNAKE_CASE : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : list[float] = []
UpperCAmelCase_ : list[float] = [-1] * arr_size
for index in reversed(range(_SCREAMING_SNAKE_CASE ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase_ : Tuple = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 71 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 1 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 71 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 1 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool = False ) -> str:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = F'''Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = F'''Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = input_str.split("_" )
UpperCAmelCase_ : Optional[int] = 0 if use_pascal else 1
UpperCAmelCase_ : Dict = words[start_index:]
UpperCAmelCase_ : List[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase_ : Tuple = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 71 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : str ="xmod"
def __init__( self ,_snake_case=3_05_22 ,_snake_case=7_68 ,_snake_case=12 ,_snake_case=12 ,_snake_case=30_72 ,_snake_case="gelu" ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=5_12 ,_snake_case=2 ,_snake_case=0.02 ,_snake_case=1E-12 ,_snake_case=1 ,_snake_case=0 ,_snake_case=2 ,_snake_case="absolute" ,_snake_case=True ,_snake_case=None ,_snake_case=False ,_snake_case=2 ,_snake_case=False ,_snake_case=True ,_snake_case=True ,_snake_case=("en_XX",) ,_snake_case=None ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Any = position_embedding_type
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Union[str, Any] = classifier_dropout
UpperCAmelCase_ : Optional[Any] = pre_norm
UpperCAmelCase_ : int = adapter_reduction_factor
UpperCAmelCase_ : Dict = adapter_layer_norm
UpperCAmelCase_ : str = adapter_reuse_layer_norm
UpperCAmelCase_ : Dict = ln_before_adapter
UpperCAmelCase_ : List[Any] = list(_snake_case )
UpperCAmelCase_ : Optional[Any] = default_language
class _snake_case (__SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 71 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _snake_case (unittest.TestCase):
def __init__( self ,_snake_case ,_snake_case=7 ,_snake_case=3 ,_snake_case=18 ,_snake_case=30 ,_snake_case=4_00 ,_snake_case=True ,_snake_case=None ,_snake_case=True ,):
UpperCAmelCase_ : Dict = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Optional[int] = min_resolution
UpperCAmelCase_ : Any = max_resolution
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Dict = do_normalize
def UpperCamelCase__ ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case ,"clusters" ) )
self.assertTrue(hasattr(_snake_case ,"do_resize" ) )
self.assertTrue(hasattr(_snake_case ,"size" ) )
self.assertTrue(hasattr(_snake_case ,"do_normalize" ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"height": 18, "width": 18} )
UpperCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"height": 42, "width": 42} )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : List[str] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case ,obj[key] ) )
else:
self.assertEqual(obj[key] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Any = os.path.join(_snake_case ,"image_processor.json" )
image_processor_first.to_json_file(_snake_case )
UpperCAmelCase_ : List[str] = self.image_processing_class.from_json_file(_snake_case ).to_dict()
UpperCAmelCase_ : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_snake_case )
UpperCAmelCase_ : Dict = self.image_processing_class.from_pretrained(_snake_case ).to_dict()
UpperCAmelCase_ : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,_snake_case )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase__ ( self ):
pass
def a__ ( ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCAmelCase_ : Tuple = Image.open(dataset[4]["file"] )
UpperCAmelCase_ : Union[str, Any] = Image.open(dataset[5]["file"] )
UpperCAmelCase_ : Any = [imagea, imagea]
return images
@require_vision
@require_torch
class _snake_case (unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCAmelCase_ : Dict = prepare_images()
# test non-batched
UpperCAmelCase_ : Optional[int] = image_processing(images[0] ,return_tensors="pt" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 10_24) )
UpperCAmelCase_ : List[str] = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,_snake_case )
# test batched
UpperCAmelCase_ : Tuple = image_processing(_snake_case ,return_tensors="pt" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 10_24) )
UpperCAmelCase_ : int = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,_snake_case )
| 71 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCamelCase = sys.version_info >= (3, 10)
def a__ ( _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Any=None ) -> int:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE )
@dataclass
class _snake_case :
__A : int
__A : float
__A : str
__A : bool
@dataclass
class _snake_case :
__A : int =42
__A : str =field(default="toto" , metadata={"help": "help message"})
@dataclass
class _snake_case :
__A : bool =False
__A : bool =True
__A : Optional[bool] =None
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[int] ="titi"
__A : str ="toto"
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] ="titi"
__A : List[Any] ="toto"
__A : Optional[Any] =42
@dataclass
class _snake_case :
__A : BasicEnum ="toto"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = BasicEnum(self.foo )
@dataclass
class _snake_case :
__A : MixedTypeEnum ="toto"
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = MixedTypeEnum(self.foo )
@dataclass
class _snake_case :
__A : Optional[int] =None
__A : Optional[float] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"})
__A : Optional[str] =None
__A : Optional[List[str]] =list_field(default=[])
__A : Optional[List[int]] =list_field(default=[])
@dataclass
class _snake_case :
__A : List[int] =list_field(default=[])
__A : List[int] =list_field(default=[1, 2, 3])
__A : List[str] =list_field(default=["Hallo", "Bonjour", "Hello"])
__A : List[float] =list_field(default=[0.1, 0.2, 0.3])
@dataclass
class _snake_case :
__A : List[int] =field()
__A : str =field()
__A : BasicEnum =field()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = BasicEnum(self.required_enum )
@dataclass
class _snake_case :
__A : int
__A : "BasicEnum" =field()
__A : "Optional[bool]" =None
__A : "str" =field(default="toto" , metadata={"help": "help message"})
__A : "List[str]" =list_field(default=["Hallo", "Bonjour", "Hello"])
if is_python_no_less_than_3_10:
@dataclass
class _snake_case :
__A : bool =False
__A : bool =True
__A : bool | None =None
@dataclass
class _snake_case :
__A : int | None =None
__A : float | None =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "help message"})
__A : str | None =None
__A : list[str] | None =list_field(default=[])
__A : list[int] | None =list_field(default=[])
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
self.assertEqual(len(a._actions ) ,len(b._actions ) )
for x, y in zip(a._actions ,b._actions ):
UpperCAmelCase_ : List[str] = {k: v for k, v in vars(_snake_case ).items() if k != "container"}
UpperCAmelCase_ : List[Any] = {k: v for k, v in vars(_snake_case ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" ,_snake_case ) and yy.get("choices" ,_snake_case ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](_snake_case ) ,yy["type"](_snake_case ) )
del xx["type"], yy["type"]
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--bar" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--baz" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--flag" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCAmelCase_) , ) : int = parser.parse_args_into_dataclasses(_snake_case ,look_for_args_file=_snake_case )
self.assertFalse(example.flag )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("--foo" ,default=42 ,type=_snake_case )
expected.add_argument("--baz" ,default="toto" ,type=_snake_case ,help="help message" )
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
expected.add_argument("--baz" ,type=_snake_case ,default=_snake_case ,const=_snake_case ,nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" ,action="store_false" ,default=_snake_case ,dest="baz" )
expected.add_argument("--opt" ,type=_snake_case ,default=_snake_case )
UpperCAmelCase_ : Tuple = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_snake_case )
for dataclass_type in dataclass_types:
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : List[str] = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : Any = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : List[str] = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,baz=_snake_case ,opt=_snake_case ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Any = argparse.ArgumentParser()
expected.add_argument(
"--foo" ,default="toto" ,choices=["titi", "toto", 42] ,type=make_choice_type_function(["titi", "toto", 42] ) ,)
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = parser.parse_args([] )
self.assertEqual(args.foo ,"toto" )
UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto )
UpperCAmelCase_ : Dict = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo ,"titi" )
UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo ,42 )
UpperCAmelCase_ : Any = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo )
def UpperCamelCase__ ( self ):
@dataclass
class _snake_case :
__A : Literal["titi", "toto", 42] ="toto"
UpperCAmelCase_ : List[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument(
"--foo" ,default="toto" ,choices=("titi", "toto", 42) ,type=make_choice_type_function(["titi", "toto", 42] ) ,)
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args([] )
self.assertEqual(args.foo ,"toto" )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo ,"titi" )
UpperCAmelCase_ : int = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo ,42 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo_int" ,nargs="+" ,default=[] ,type=_snake_case )
expected.add_argument("--bar_int" ,nargs="+" ,default=[1, 2, 3] ,type=_snake_case )
expected.add_argument("--foo_str" ,nargs="+" ,default=["Hallo", "Bonjour", "Hello"] ,type=_snake_case )
expected.add_argument("--foo_float" ,nargs="+" ,default=[0.1, 0.2, 0.3] ,type=_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = parser.parse_args([] )
self.assertEqual(
_snake_case ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["Hallo", "Bonjour", "Hello"] ,foo_float=[0.1, 0.2, 0.3] ) ,)
UpperCAmelCase_ : Tuple = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(_snake_case ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["a", "b", "c"] ,foo_float=[0.1, 0.7] ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = argparse.ArgumentParser()
expected.add_argument("--foo" ,default=_snake_case ,type=_snake_case )
expected.add_argument("--bar" ,default=_snake_case ,type=_snake_case ,help="help message" )
expected.add_argument("--baz" ,default=_snake_case ,type=_snake_case )
expected.add_argument("--ces" ,nargs="+" ,default=[] ,type=_snake_case )
expected.add_argument("--des" ,nargs="+" ,default=[] ,type=_snake_case )
UpperCAmelCase_ : Dict = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_snake_case )
for dataclass_type in dataclass_types:
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(_snake_case ,Namespace(foo=_snake_case ,bar=_snake_case ,baz=_snake_case ,ces=[] ,des=[] ) )
UpperCAmelCase_ : List[str] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(_snake_case ,Namespace(foo=12 ,bar=3.14 ,baz="42" ,ces=["a", "b", "c"] ,des=[1, 2, 3] ) )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = HfArgumentParser(_snake_case )
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--required_list" ,nargs="+" ,type=_snake_case ,required=_snake_case )
expected.add_argument("--required_str" ,type=_snake_case ,required=_snake_case )
expected.add_argument(
"--required_enum" ,type=make_choice_type_function(["titi", "toto"] ) ,choices=["titi", "toto"] ,required=_snake_case ,)
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" ,type=_snake_case ,required=_snake_case )
expected.add_argument(
"--required_enum" ,type=make_choice_type_function(["titi", "toto"] ) ,choices=["titi", "toto"] ,required=_snake_case ,)
expected.add_argument("--opt" ,type=_snake_case ,default=_snake_case )
expected.add_argument("--baz" ,default="toto" ,type=_snake_case ,help="help message" )
expected.add_argument("--foo_str" ,nargs="+" ,default=["Hallo", "Bonjour", "Hello"] ,type=_snake_case )
self.argparsersEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : List[str] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCAmelCase_ : Any = parser.parse_dict(_snake_case )[0]
UpperCAmelCase_ : Dict = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = HfArgumentParser(_snake_case )
UpperCAmelCase_ : str = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(_snake_case ,parser.parse_dict ,_snake_case ,allow_extra_keys=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = HfArgumentParser(_snake_case )
UpperCAmelCase_ : Any = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[Any] = os.path.join(_snake_case ,"temp_json" )
os.mkdir(_snake_case )
with open(temp_local_path + ".json" ,"w+" ) as f:
json.dump(_snake_case ,_snake_case )
UpperCAmelCase_ : str = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCAmelCase_ : Union[str, Any] = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = HfArgumentParser(_snake_case )
UpperCAmelCase_ : int = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Dict = os.path.join(_snake_case ,"temp_yaml" )
os.mkdir(_snake_case )
with open(temp_local_path + ".yaml" ,"w+" ) as f:
yaml.dump(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCAmelCase_ : str = BasicExample(**_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = HfArgumentParser(_snake_case )
self.assertIsNotNone(_snake_case )
| 71 |
'''simple docstring'''
import json
import sys
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = results[benchmark_name]
UpperCAmelCase_ : Any = benchmark_name.split("/" )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
UpperCAmelCase_ : Any = "| metric |"
UpperCAmelCase_ : Any = "|--------|"
UpperCAmelCase_ : Union[str, Any] = "| new / old (diff) |"
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = benchmark_res[metric_name]
UpperCAmelCase_ : Union[str, Any] = metric_vals["new"]
UpperCAmelCase_ : Optional[Any] = metric_vals.get("old" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = metric_vals.get("diff" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = F''' {new_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
_lowerCamelCase = sys.argv[1]
_lowerCamelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 71 | 1 |
'''simple docstring'''
from math import sqrt
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
UpperCAmelCase_ : int = True
# 0 and 1 are none primes.
if number <= 1:
UpperCAmelCase_ : List[Any] = False
for divisor in range(2 , int(round(sqrt(_SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCAmelCase_ : Dict = False
break
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCAmelCase_ : Tuple = list(range(2 , n + 1 ) )
UpperCAmelCase_ : Dict = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCAmelCase_ : List[Any] = 0
# filters actual prime numbers.
UpperCAmelCase_ : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
UpperCAmelCase_ : Union[str, Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_SCREAMING_SNAKE_CASE ):
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
UpperCAmelCase_ : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Optional[Any] = number
if number == 0 or number == 1:
ans.append(_SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(_SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(_SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCAmelCase_ : Tuple = 0
# prime factorization of 'number'
UpperCAmelCase_ : str = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = max(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCAmelCase_ : List[str] = 0
# prime factorization of 'number'
UpperCAmelCase_ : Any = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = min(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(_SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
UpperCAmelCase_ : Optional[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCAmelCase_ : Any = get_prime_numbers(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = len(_SCREAMING_SNAKE_CASE )
# run variable for while-loops.
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[Any] = None
# exit variable. for break up the loops
UpperCAmelCase_ : Tuple = True
while i < len_pn and loop:
UpperCAmelCase_ : Dict = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCAmelCase_ : str = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (len(_SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
"""simple docstring"""
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCAmelCase_ : Any = 0
while numbera != 0:
UpperCAmelCase_ : Dict = numbera % numbera
UpperCAmelCase_ : str = numbera
UpperCAmelCase_ : Optional[int] = rest
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCAmelCase_ : str = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCAmelCase_ : Dict = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = prime_factorization(_SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : str = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : str = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCAmelCase_ : Optional[int] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
UpperCAmelCase_ : Any = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCAmelCase_ : Dict = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> int:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
UpperCAmelCase_ : Tuple = 0
UpperCAmelCase_ : Union[str, Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and is_prime(
_SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
assert (
is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(_SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCAmelCase_ : str = p_number_a + 1 # jump to the next number
UpperCAmelCase_ : Any = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(_SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(_SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
UpperCAmelCase_ : Tuple = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(_SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCAmelCase_ : List[Any] = get_divisors(_SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(_SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCAmelCase_ : int = gcd(abs(_SCREAMING_SNAKE_CASE ) , abs(_SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
UpperCAmelCase_ : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Any = 1 # this will be return
for _ in range(n - 1 ):
UpperCAmelCase_ : List[str] = ans
ans += fiba
UpperCAmelCase_ : Dict = tmp
return ans
| 71 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71 | 1 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =WavaVecaPhonemeCTCTokenizer
__A : Tuple =False
def UpperCamelCase__ ( self ):
super().setUp()
UpperCAmelCase_ : Optional[int] = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
UpperCAmelCase_ : int = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : Optional[Any] = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=False ,_snake_case=20 ,_snake_case=5 ):
UpperCAmelCase_ : str = [(i, tokenizer.decode([i] ,clean_up_tokenization_spaces=_snake_case )) for i in range(len(_snake_case ) )]
UpperCAmelCase_ : Any = list(filter(lambda _snake_case : [t[0]] == tokenizer.encode(t[1] ,do_phonemize=_snake_case ) ,_snake_case ) )
if max_length is not None and len(_snake_case ) > max_length:
UpperCAmelCase_ : Optional[int] = toks[:max_length]
if min_length is not None and len(_snake_case ) < min_length and len(_snake_case ) > 0:
while len(_snake_case ) < min_length:
UpperCAmelCase_ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ : str = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ : Dict = tokenizer.decode(_snake_case ,clean_up_tokenization_spaces=_snake_case )
if " " not in output_txt and len(_snake_case ) > 1:
UpperCAmelCase_ : Tuple = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=_snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=_snake_case )
)
if with_prefix_space:
UpperCAmelCase_ : Any = " " + output_txt
UpperCAmelCase_ : Dict = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
return output_txt, output_ids
def UpperCamelCase__ ( self ,**_snake_case ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
UpperCAmelCase_ : List[str] = tokenizer("m xxx ɪ" ,do_phonemize=_snake_case ).input_ids
self.assertEqual(_snake_case ,[13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
UpperCAmelCase_ : List[Any] = tokenizer("m aaa ɪ ccc" ,do_phonemize=_snake_case ).input_ids
self.assertEqual(_snake_case ,[13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase_ : int = tokenizer("maɪ c" ,do_phonemize=_snake_case ).input_ids
self.assertEqual(_snake_case ,[3, 2_00] ) # mai should be <unk> (=3)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ : Optional[Any] = "Hello how are you"
UpperCAmelCase_ : Dict = tokenizer.phonemize(_snake_case ,phonemizer_lang="en-us" )
self.assertEqual(_snake_case ,"h ə l oʊ h aʊ ɑːɹ j uː" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ : Optional[int] = "Hello how are you"
UpperCAmelCase_ : int = tokenizer.phonemize(_snake_case ,phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_snake_case ).input_ids ,tokenizer(_snake_case ,do_phonemize=_snake_case ).input_ids )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ : Tuple = "Hello how are you"
UpperCAmelCase_ : Optional[Any] = tokenizer.phonemize(_snake_case ,phonemizer_lang="en-us" )
UpperCAmelCase_ : int = tokenizer.decode(tokenizer(_snake_case ).input_ids )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ : Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase_ : Tuple = tokenizer.decode(sample_ids[0] )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case ,batch_tokens[0] )
self.assertEqual(_snake_case ,["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" ,word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ : Union[str, Any] = "Hello how are you"
UpperCAmelCase_ : Tuple = tokenizer.phonemize(_snake_case ,phonemizer_lang="en-us" )
self.assertEqual(_snake_case ,"h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" ,word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ : Union[str, Any] = "Hello how are you"
UpperCAmelCase_ : int = tokenizer.phonemize(_snake_case ,phonemizer_lang="en-us" )
self.assertEqual(tokenizer(_snake_case ).input_ids ,tokenizer(_snake_case ,do_phonemize=_snake_case ).input_ids )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" ,word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
UpperCAmelCase_ : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase_ : str = tokenizer.decode(sample_ids[0] )
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case ,batch_tokens[0] )
self.assertEqual(_snake_case ,["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
UpperCAmelCase_ : int = tokenizer.decode(sample_ids[0] ,filter_word_delimiter_token=_snake_case )
UpperCAmelCase_ : str = tokenizer.batch_decode(_snake_case ,filter_word_delimiter_token=_snake_case )
self.assertEqual(_snake_case ,batch_tokens[0] )
self.assertEqual(_snake_case ,["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" ,word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ : int = "Hello how are you"
UpperCAmelCase_ : Tuple = tokenizer.phonemize(_snake_case ,phonemizer_lang="en-us" )
UpperCAmelCase_ : Any = tokenizer.decode(tokenizer(_snake_case ).input_ids ,filter_word_delimiter_token=_snake_case )
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" ,word_delimiter_token="|" )
tokenizer.add_tokens("|" )
UpperCAmelCase_ : Optional[Any] = "Hello how are you"
UpperCAmelCase_ : Union[str, Any] = tokenizer.phonemize(_snake_case ,phonemizer_lang="en-us" )
UpperCAmelCase_ : Dict = tokenizer.decode(tokenizer(_snake_case ).input_ids ,filter_word_delimiter_token=_snake_case )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" ,word_delimiter_token=_snake_case )
UpperCAmelCase_ : Dict = "Hello how are you"
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ,phonemizer_lang="en-us" ).input_ids
UpperCAmelCase_ : Optional[Any] = tokenizer(_snake_case ,phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : str = tokenizer.decode(_snake_case )
UpperCAmelCase_ : str = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case ,"h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(_snake_case ,"ɛ l o h aʊ a ʁ j u" )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
UpperCAmelCase_ : Optional[Any] = "Hello how Are you"
UpperCAmelCase_ : Optional[int] = "hello how are you"
UpperCAmelCase_ : Dict = tokenizer(_snake_case ).input_ids
UpperCAmelCase_ : List[str] = tokenizer(_snake_case ).input_ids
self.assertEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
UpperCAmelCase_ : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
UpperCAmelCase_ : List[Any] = tokenizer.batch_decode(_snake_case )
self.assertEqual(_snake_case ,["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def UpperCamelCase__ ( _snake_case ,_snake_case ):
UpperCAmelCase_ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase_ : Optional[int] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase_ : Tuple = tokenizer.decode(_snake_case ,output_char_offsets=_snake_case ,filter_word_delimiter_token=_snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) ,2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(_snake_case ,_snake_case ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] ,"char" ) ) ,outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] ,"char" ) ,["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] ,"start_offset" ) ,[0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] ,"end_offset" ) ,[1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(_snake_case ,_snake_case ):
self.assertTrue(isinstance(_snake_case ,_snake_case ) )
self.assertTrue(isinstance(outputs_list[0] ,_snake_case ) )
# transform list to ModelOutput
UpperCAmelCase_ : List[str] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] ,outputs_batch_a["text"] )
def recursive_check(_snake_case ,_snake_case ):
if isinstance(_snake_case ,_snake_case ):
[recursive_check(_snake_case ,_snake_case ) for la, la in zip(_snake_case ,_snake_case )]
self.assertEqual(_snake_case ,_snake_case )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] ,outputs_batch_a["char_offsets"] )
# fmt: off
UpperCAmelCase_ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase_ : List[Any] = tokenizer.batch_decode(_snake_case ,output_char_offsets=_snake_case )
UpperCAmelCase_ : Dict = [tokenizer.decode(_snake_case ,output_char_offsets=_snake_case ) for ids in sample_ids]
check_list_tuples_equal(_snake_case ,_snake_case )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ : Any = tokenizer.vocab_size
UpperCAmelCase_ : str = len(_snake_case )
self.assertNotEqual(_snake_case ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ : str = ["aaaaa bbbbbb", "cccccccccdddddddd"]
UpperCAmelCase_ : Optional[int] = tokenizer.add_tokens(_snake_case )
UpperCAmelCase_ : Any = tokenizer.vocab_size
UpperCAmelCase_ : int = len(_snake_case )
self.assertNotEqual(_snake_case ,0 )
self.assertEqual(_snake_case ,_snake_case )
self.assertEqual(_snake_case ,len(_snake_case ) )
self.assertEqual(_snake_case ,all_size + len(_snake_case ) )
UpperCAmelCase_ : List[Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" ,add_special_tokens=_snake_case )
self.assertGreaterEqual(len(_snake_case ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
UpperCAmelCase_ : str = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
UpperCAmelCase_ : str = tokenizer.add_special_tokens(_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.vocab_size
UpperCAmelCase_ : Tuple = len(_snake_case )
self.assertNotEqual(_snake_case ,0 )
self.assertEqual(_snake_case ,_snake_case )
self.assertEqual(_snake_case ,len(_snake_case ) )
self.assertEqual(_snake_case ,all_size_a + len(_snake_case ) )
UpperCAmelCase_ : Dict = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" ,add_special_tokens=_snake_case )
self.assertGreaterEqual(len(_snake_case ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizers(fast=_snake_case ,do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ : Union[str, Any] = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_string(_snake_case )
self.assertIsInstance(output["text"] ,_snake_case )
| 71 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be an 'int' type" )
UpperCAmelCase_ : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case (unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ,return_dict=_snake_case ).to(_snake_case )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ : int = tokenizer("Hello there" ,return_tensors="pt" ).input_ids
UpperCAmelCase_ : str = tokenizer("Hi I am" ,return_tensors="pt" ).input_ids
UpperCAmelCase_ : Optional[int] = model(input_ids.to(_snake_case ) ,labels=labels.to(_snake_case ) ).loss
UpperCAmelCase_ : Any = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ : Tuple = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 71 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple ="longformer"
def __init__( self ,_snake_case = 5_12 ,_snake_case = 2 ,_snake_case = 1 ,_snake_case = 0 ,_snake_case = 2 ,_snake_case = 3_05_22 ,_snake_case = 7_68 ,_snake_case = 12 ,_snake_case = 12 ,_snake_case = 30_72 ,_snake_case = "gelu" ,_snake_case = 0.1 ,_snake_case = 0.1 ,_snake_case = 5_12 ,_snake_case = 2 ,_snake_case = 0.02 ,_snake_case = 1E-12 ,_snake_case = False ,**_snake_case ,):
super().__init__(pad_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : Union[str, Any] = attention_window
UpperCAmelCase_ : str = sep_token_id
UpperCAmelCase_ : List[str] = bos_token_id
UpperCAmelCase_ : Optional[Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Optional[int] = onnx_export
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = "default" ,_snake_case = None ):
super().__init__(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = True
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = super().outputs
if self.task == "default":
UpperCAmelCase_ : str = {0: "batch"}
return outputs
@property
def UpperCamelCase__ ( self ):
return 1E-4
@property
def UpperCamelCase__ ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset ,14 )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = -1 ,_snake_case = -1 ,_snake_case = False ,_snake_case = None ,):
UpperCAmelCase_ : Any = super().generate_dummy_inputs(
preprocessor=_snake_case ,batch_size=_snake_case ,seq_length=_snake_case ,is_pair=_snake_case ,framework=_snake_case )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase_ : Tuple = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCAmelCase_ : List[str] = 1
return inputs
| 71 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =VideoToVideoSDPipeline
__A : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
__A : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
__A : str =PipelineTesterMixin.required_optional_params - {"latents"}
__A : Dict =False
# No `output_type`.
__A : Optional[int] =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
# 3 frames
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 3, 32, 32) ,rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : str = VideoToVideoSDPipeline(**_snake_case )
UpperCAmelCase_ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = "np"
UpperCAmelCase_ : Dict = sd_pipe(**_snake_case ).frames
UpperCAmelCase_ : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
UpperCAmelCase_ : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case ,expected_max_diff=5E-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" ,torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : int = torch.randn((1, 10, 3, 10_24, 5_76) ,generator=_snake_case )
UpperCAmelCase_ : List[Any] = video.to("cuda" )
UpperCAmelCase_ : List[Any] = "Spiderman is surfing"
UpperCAmelCase_ : Optional[Any] = pipe(_snake_case ,video=_snake_case ,generator=_snake_case ,num_inference_steps=3 ,output_type="pt" ).frames
UpperCAmelCase_ : Any = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 71 | 1 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Optional[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Dict = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : str = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Union[str, Any] = 8
else:
UpperCAmelCase_ : Union[str, Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Any = int(config["num_epochs"] )
UpperCAmelCase_ : Any = int(config["seed"] )
UpperCAmelCase_ : Optional[int] = int(config["batch_size"] )
UpperCAmelCase_ : Union[str, Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : int = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : Tuple = MAX_GPU_BATCH_SIZE
set_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : Optional[int] = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Any = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.loss
UpperCAmelCase_ : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Any = parser.parse_args()
UpperCAmelCase_ : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
inspect_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
inspect_metric(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = path + ".py"
assert script_name in os.listdir(_SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_config_info(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_config_names(_SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_dataset_infos(_SCREAMING_SNAKE_CASE )
assert expected_config in infos
UpperCAmelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
with pytest.raises(_SCREAMING_SNAKE_CASE ):
get_dataset_split_names(_SCREAMING_SNAKE_CASE , config_name=_SCREAMING_SNAKE_CASE )
| 71 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[str] ="gpt_neox"
def __init__( self ,_snake_case=5_04_32 ,_snake_case=61_44 ,_snake_case=44 ,_snake_case=64 ,_snake_case=2_45_76 ,_snake_case="gelu" ,_snake_case=0.25 ,_snake_case=1_00_00 ,_snake_case=0.0 ,_snake_case=0.0 ,_snake_case=0.1 ,_snake_case=20_48 ,_snake_case=0.02 ,_snake_case=1E-5 ,_snake_case=True ,_snake_case=0 ,_snake_case=2 ,_snake_case=False ,_snake_case=True ,_snake_case=None ,**_snake_case ,):
super().__init__(bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Tuple = rotary_pct
UpperCAmelCase_ : Optional[int] = rotary_emb_base
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Dict = hidden_dropout
UpperCAmelCase_ : List[str] = classifier_dropout
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Optional[int] = use_cache
UpperCAmelCase_ : Optional[Any] = tie_word_embeddings
UpperCAmelCase_ : List[Any] = use_parallel_residual
UpperCAmelCase_ : List[str] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def UpperCamelCase__ ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,_snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
UpperCAmelCase_ : List[Any] = self.rope_scaling.get("type" ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.rope_scaling.get("factor" ,_snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_snake_case ,_snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 71 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AudioLDMPipeline
__A : Dict =TEXT_TO_AUDIO_PARAMS
__A : Any =TEXT_TO_AUDIO_BATCH_PARAMS
__A : Tuple =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
])
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=(32, 64) ,class_embed_type="simple_projection" ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=_snake_case ,)
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = ClapTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,projection_dim=32 ,)
UpperCAmelCase_ : Optional[Any] = ClapTextModelWithProjection(_snake_case )
UpperCAmelCase_ : List[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" ,model_max_length=77 )
UpperCAmelCase_ : Optional[int] = SpeechTaHifiGanConfig(
model_in_dim=8 ,sampling_rate=1_60_00 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=_snake_case ,)
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_snake_case )
UpperCAmelCase_ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : Any = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : Any = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : int = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Tuple = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Any = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[str] = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : str = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : str = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : Dict = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Tuple = F.normalize(_snake_case ,dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : int = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Tuple = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = 3 * ["this is a negative prompt"]
UpperCAmelCase_ : Any = negative_prompt
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
UpperCAmelCase_ : Dict = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Dict = output.audios[0]
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = 3 * [inputs.pop("prompt" )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Any = audioldm_pipe.tokenizer(
_snake_case ,padding="max_length" ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=_snake_case ,return_tensors="pt" ,)
UpperCAmelCase_ : List[Any] = text_inputs["input_ids"].to(_snake_case )
UpperCAmelCase_ : str = audioldm_pipe.text_encoder(
_snake_case ,)
UpperCAmelCase_ : List[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_snake_case ,dim=-1 )
embeds.append(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = embeds
# forward
UpperCAmelCase_ : Tuple = audioldm_pipe(**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : int = "egg cracking"
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ,negative_prompt=_snake_case )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 2_56
UpperCAmelCase_ : List[Any] = audio[:10]
UpperCAmelCase_ : Any = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=_snake_case )
UpperCAmelCase_ : Any = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : Any = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : Any = audioldm_pipe(_snake_case ,num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[Any] = audioldm_pipe(_snake_case ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = audioldm_pipe(
[prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : List[Any] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 ,**_snake_case )
UpperCAmelCase_ : str = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
UpperCAmelCase_ : List[Any] = audioldm_pipe(audio_length_in_s=0.032 ,**_snake_case )
UpperCAmelCase_ : Any = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : str = AudioLDMPipeline(**_snake_case )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : int = ["hey"]
UpperCAmelCase_ : Dict = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_56)
UpperCAmelCase_ : Tuple = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[Any] = SpeechTaHifiGan(_snake_case ).to(_snake_case )
UpperCAmelCase_ : Tuple = audioldm_pipe(_snake_case ,num_inference_steps=1 )
UpperCAmelCase_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def UpperCamelCase__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def UpperCamelCase__ ( self ):
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="cpu" ,_snake_case=torch.floataa ,_snake_case=0 ):
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : str = np.random.RandomState(_snake_case ).standard_normal((1, 8, 1_28, 16) )
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_snake_case ).to(device=_snake_case ,dtype=_snake_case )
UpperCAmelCase_ : List[str] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = self.get_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = 25
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Union[str, Any] = audio[7_72_30:7_72_40]
UpperCAmelCase_ : Any = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
UpperCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : int = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Tuple = self.get_inputs(_snake_case )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 8_19_20
UpperCAmelCase_ : Any = audio[2_77_80:2_77_90]
UpperCAmelCase_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 71 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 | 1 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
_lowerCamelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = tmp_path_factory.getbasetemp() / "cache"
UpperCAmelCase_ : int = test_hf_cache_home / "datasets"
UpperCAmelCase_ : List[Any] = test_hf_cache_home / "metrics"
UpperCAmelCase_ : str = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : int = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope="session" )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
"""simple docstring"""
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _SCREAMING_SNAKE_CASE )
| 71 |
'''simple docstring'''
import heapq
def a__ ( _SCREAMING_SNAKE_CASE : dict ) -> set[int]:
"""simple docstring"""
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_SCREAMING_SNAKE_CASE , [-1 * len(_SCREAMING_SNAKE_CASE ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : Optional[int] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Tuple = heapq.heappop(_SCREAMING_SNAKE_CASE )[1][0]
chosen_vertices.add(_SCREAMING_SNAKE_CASE )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Any = elem[1][1].index(_SCREAMING_SNAKE_CASE )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_SCREAMING_SNAKE_CASE )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 71 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,*_snake_case ,**_snake_case ):
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 71 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Dict =PhobertTokenizer
__A : Tuple =False
def UpperCamelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : List[str] = ["T@@", "i", "I", "R@@", "r", "e@@"]
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : Tuple = ["#version: 0.2", "l à</w>"]
UpperCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def UpperCamelCase__ ( self ,**_snake_case ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Tuple = "Tôi là VinAI Research"
UpperCAmelCase_ : Any = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = PhobertTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
UpperCAmelCase_ : List[str] = "Tôi là VinAI Research"
UpperCAmelCase_ : Tuple = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_snake_case )
print(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,_snake_case )
| 71 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(
_snake_case ,split=_snake_case ,features=_snake_case ,cache_dir=_snake_case ,keep_in_memory=_snake_case ,streaming=_snake_case ,num_proc=_snake_case ,**_snake_case ,)
UpperCAmelCase_ : Tuple = field
UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(_snake_case ,_snake_case ) else {self.split: path_or_paths}
UpperCAmelCase_ : Optional[int] = Json(
cache_dir=_snake_case ,data_files=_snake_case ,features=_snake_case ,field=_snake_case ,**_snake_case ,)
def UpperCamelCase__ ( self ):
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : List[str] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : int = None
self.builder.download_and_prepare(
download_config=_snake_case ,download_mode=_snake_case ,verification_mode=_snake_case ,base_path=_snake_case ,num_proc=self.num_proc ,)
UpperCAmelCase_ : Dict = self.builder.as_dataset(
split=self.split ,verification_mode=_snake_case ,in_memory=self.keep_in_memory )
return dataset
class _snake_case :
def __init__( self ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Union[str, Any] = path_or_buf
UpperCAmelCase_ : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : Dict = num_proc
UpperCAmelCase_ : Optional[Any] = "utf-8"
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop("path_or_buf" ,_snake_case )
UpperCAmelCase_ : Tuple = self.to_json_kwargs.pop("orient" ,"records" )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop("lines" ,True if orient == "records" else False )
UpperCAmelCase_ : Optional[int] = self.to_json_kwargs.pop("index" ,False if orient in ["split", "table"] else True )
UpperCAmelCase_ : int = self.to_json_kwargs.pop("compression" ,_snake_case )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"wb" ,compression=_snake_case ) as buffer:
UpperCAmelCase_ : List[str] = self._write(file_obj=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
" was passed. Please provide a local path instead." )
UpperCAmelCase_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**self.to_json_kwargs )
return written
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data ,key=slice(_snake_case ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
UpperCAmelCase_ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=_snake_case ,orient=_snake_case ,lines=_snake_case ,index=_snake_case ,**_snake_case )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
UpperCAmelCase_ : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_snake_case )
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_snake_case ,_snake_case )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating json from Arrow format" ,):
written += file_obj.write(_snake_case )
return written
| 71 | 1 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCAmelCase_ : Dict = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
UpperCAmelCase_ : List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
UpperCAmelCase_ : str = convert_pytorch_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase_ : List[str] = convert_pytorch_sharded_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return flax_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : Tuple[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, jnp.ndarray] , _SCREAMING_SNAKE_CASE : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE : Tuple[str] ) -> bool:
return len(set(_SCREAMING_SNAKE_CASE ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase_ : str = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Optional[int] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Optional[Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase_ : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase_ : Union[str, Any] = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase_ : str = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCAmelCase_ : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase_ : List[Any] = flax_model.params["params"]
else:
UpperCAmelCase_ : Optional[Any] = flax_model.params
UpperCAmelCase_ : str = flatten_dict(_SCREAMING_SNAKE_CASE )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ : Tuple = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Dict = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : int = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
UpperCAmelCase_ : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase_ : str = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : List[Any] = jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Optional[int] = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
UpperCAmelCase_ : Optional[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase_ : str = torch.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ : Optional[int] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ : List[str] = flax_model.params["params"]
UpperCAmelCase_ : List[Any] = flatten_dict(_SCREAMING_SNAKE_CASE )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCAmelCase_ : Tuple = flax_model.params
UpperCAmelCase_ : Any = flatten_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Optional[int] = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
UpperCAmelCase_ : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase_ : Dict = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
if "var" in flax_key[-1]:
UpperCAmelCase_ : Dict = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : str = jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Optional[int] = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
UpperCAmelCase_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_SCREAMING_SNAKE_CASE , "rb" ) as state_f:
try:
UpperCAmelCase_ : Union[str, Any] = from_bytes(_SCREAMING_SNAKE_CASE , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCAmelCase_ : List[str] = flatten_dict(jax.tree_util.tree_map(lambda _SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , _SCREAMING_SNAKE_CASE ) ).values()
if any(_SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCAmelCase_ : List[Any] = jax.tree_util.tree_map(
lambda _SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = flatten_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = pt_model.state_dict()
UpperCAmelCase_ : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase_ : Any = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_ : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase_ : Dict = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Any = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# conv layer
UpperCAmelCase_ : Dict = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Optional[Any] = jnp.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# linear layer
UpperCAmelCase_ : List[str] = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_ : Optional[int] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase_ : Dict = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase_ : Tuple = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCAmelCase_ : Dict = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase_ : Union[str, Any] = ".".join(_SCREAMING_SNAKE_CASE )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase_ : Dict = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase_ : List[Any] = key.split("." )
UpperCAmelCase_ : int = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase_ : str = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase_ : Dict = key_components[-2] + "_v"
if name is not None:
UpperCAmelCase_ : Dict = key_components[:-3] + [name]
UpperCAmelCase_ : Union[str, Any] = ".".join(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = key
if flax_key in special_pt_names:
UpperCAmelCase_ : List[str] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCAmelCase_ : List[str] = np.asarray(_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
UpperCAmelCase_ : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(_SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
" use it for predictions and inference." )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"If your task is similar to the task the model of the checkpoint was trained on, "
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 71 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71 | 1 |
'''simple docstring'''
from __future__ import annotations
import requests
_lowerCamelCase = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : str = "new" , _SCREAMING_SNAKE_CASE : list | None = None ) -> dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_SCREAMING_SNAKE_CASE ) - valid_terms ) ):
UpperCAmelCase_ : List[str] = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"User-agent": "A random string"} , )
if response.status_code == 4_29:
raise requests.HTTPError
UpperCAmelCase_ : int = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_SCREAMING_SNAKE_CASE )}
UpperCAmelCase_ : List[Any] = {}
for id_ in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 71 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = equationa
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = equationa
# Calculate the determinants of the matrices
UpperCAmelCase_ : Optional[int] = aa * ba - aa * ba
UpperCAmelCase_ : Optional[int] = ca * ba - ca * ba
UpperCAmelCase_ : Any = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCAmelCase_ : Optional[int] = determinant_x / determinant
UpperCAmelCase_ : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 71 | 1 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
UpperCAmelCase_ : int = float(_SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCAmelCase_ : List[str] = decimal - int(_SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(_SCREAMING_SNAKE_CASE ), 1
else:
UpperCAmelCase_ : Any = len(str(_SCREAMING_SNAKE_CASE ).split("." )[1] )
UpperCAmelCase_ : Any = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase_ : Any = 10**number_of_frac_digits
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = denominator, numerator
while True:
UpperCAmelCase_ : List[str] = dividend % divisor
if remainder == 0:
break
UpperCAmelCase_ , UpperCAmelCase_ : int = divisor, remainder
UpperCAmelCase_ , UpperCAmelCase_ : Dict = numerator / divisor, denominator / divisor
return int(_SCREAMING_SNAKE_CASE ), int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction('67') = }""")
print(f"""{decimal_to_fraction('45.0') = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction('6.25') = }""")
print(f"""{decimal_to_fraction('78td') = }""")
| 71 |
'''simple docstring'''
from statistics import mean, stdev
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data]
def a__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 3 ) -> list:
"""simple docstring"""
UpperCAmelCase_ : Tuple = mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
| 71 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Union[str, Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : int = 8
else:
UpperCAmelCase_ : Optional[Any] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize accelerator
UpperCAmelCase_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : str = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : Tuple = int(config["seed"] )
UpperCAmelCase_ : Union[str, Any] = int(config["batch_size"] )
UpperCAmelCase_ : List[str] = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(_SCREAMING_SNAKE_CASE : List[str] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : str = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase_ : Tuple = parser.parse_args()
UpperCAmelCase_ : int = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 71 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case (unittest.TestCase):
@property
def UpperCamelCase__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = ort.SessionOptions()
UpperCAmelCase_ : Any = False
return options
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCAmelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCAmelCase_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
UpperCAmelCase_ : int = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="onnx" ,safety_checker=_snake_case ,feature_extractor=_snake_case ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Optional[Any] = "A red cat sitting on a park bench"
UpperCAmelCase_ : str = np.random.RandomState(0 )
UpperCAmelCase_ : str = pipe(
prompt=_snake_case ,image=_snake_case ,mask_image=_snake_case ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=15 ,generator=_snake_case ,output_type="np" ,)
UpperCAmelCase_ : List[str] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 71 |
'''simple docstring'''
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(_SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase = logging.getLogger(__name__)
_lowerCamelCase = """Hello world! cécé herlolip"""
_lowerCamelCase = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Dict = BertAbsConfig(
temp_dir="." , finetune_bert=_SCREAMING_SNAKE_CASE , large=_SCREAMING_SNAKE_CASE , share_emb=_SCREAMING_SNAKE_CASE , use_bert_emb=_SCREAMING_SNAKE_CASE , encoder="bert" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
UpperCAmelCase_ : Tuple = torch.load(_SCREAMING_SNAKE_CASE , lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : storage )
UpperCAmelCase_ : int = AbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device("cpu" ) , _SCREAMING_SNAKE_CASE )
original.eval()
UpperCAmelCase_ : Union[str, Any] = BertAbsSummarizer(_SCREAMING_SNAKE_CASE , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
UpperCAmelCase_ : Tuple = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(_SCREAMING_SNAKE_CASE )) )
UpperCAmelCase_ : Any = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(_SCREAMING_SNAKE_CASE )) )
UpperCAmelCase_ : str = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase_ : Tuple = encoder_input_ids
UpperCAmelCase_ : Optional[int] = decoder_input_ids
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase_ : List[str] = original(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Any = original.generator(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = new_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : List[str] = new_model.generator(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : str = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : int = torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_lowerCamelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 71 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 1 |
'''simple docstring'''
from typing import Any
class _snake_case :
def __init__( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = data
UpperCAmelCase_ : List[str] = None
class _snake_case :
def __init__( self ):
UpperCAmelCase_ : str = None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.head
while temp is not None:
print(temp.data ,end=" " )
UpperCAmelCase_ : Optional[Any] = temp.next
print()
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = Node(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.head
UpperCAmelCase_ : List[str] = new_node
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
if node_data_a == node_data_a:
return
else:
UpperCAmelCase_ : Any = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ : List[Any] = node_a.next
UpperCAmelCase_ : str = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase_ , UpperCAmelCase_ : Any = node_a.data, node_a.data
if __name__ == "__main__":
_lowerCamelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 71 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
UpperCAmelCase_ : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCamelCase = logging.getLogger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
if metric == "rouge2":
UpperCAmelCase_ : int = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ : Dict = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ : List[Any] = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
UpperCAmelCase_ : Tuple = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class _snake_case (pl.Callback):
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_snake_case )
@rank_zero_only
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=True ):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCAmelCase_ : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ : Union[str, Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : Optional[Any] = od / "test_results.txt"
UpperCAmelCase_ : List[str] = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : List[Any] = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCAmelCase_ : List[str] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_snake_case )
generations_file.parent.mkdir(exist_ok=_snake_case )
with open(_snake_case ,"a+" ) as writer:
for key in sorted(_snake_case ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Union[str, Any] = metrics[key]
if isinstance(_snake_case ,torch.Tensor ):
UpperCAmelCase_ : Optional[int] = val.item()
UpperCAmelCase_ : Dict = f'''{key}: {val:.6f}\n'''
writer.write(_snake_case )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : Any = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_snake_case )
@rank_zero_only
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
try:
UpperCAmelCase_ : List[Any] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Any = pl_module.model.num_parameters()
UpperCAmelCase_ : Union[str, Any] = count_trainable_parameters(_snake_case )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_snake_case ,_snake_case ,"test" )
@rank_zero_only
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 71 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class _snake_case :
__A : Dict =BlenderbotConfig
__A : Union[str, Any] ={}
__A : Any ="gelu"
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=7 ,_snake_case=True ,_snake_case=False ,_snake_case=99 ,_snake_case=32 ,_snake_case=2 ,_snake_case=4 ,_snake_case=37 ,_snake_case=0.1 ,_snake_case=0.1 ,_snake_case=20 ,_snake_case=2 ,_snake_case=1 ,_snake_case=0 ,):
UpperCAmelCase_ : List[Any] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : str = eos_token_id
UpperCAmelCase_ : List[Any] = pad_token_id
UpperCAmelCase_ : List[Any] = bos_token_id
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
UpperCAmelCase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
UpperCAmelCase_ : Optional[Any] = tf.concat([input_ids, eos_tensor] ,axis=1 )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_snake_case ,_snake_case ,_snake_case )
return config, inputs_dict
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = TFBlenderbotModel(config=_snake_case ).get_decoder()
UpperCAmelCase_ : int = inputs_dict["input_ids"]
UpperCAmelCase_ : Dict = input_ids[:1, :]
UpperCAmelCase_ : Any = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ : int = inputs_dict["head_mask"]
UpperCAmelCase_ : Optional[int] = 1
# first forward pass
UpperCAmelCase_ : List[str] = model(_snake_case ,attention_mask=_snake_case ,head_mask=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Optional[int] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase_ : Any = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
UpperCAmelCase_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
UpperCAmelCase_ : Any = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
UpperCAmelCase_ : Any = model(_snake_case ,attention_mask=_snake_case )[0]
UpperCAmelCase_ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : str = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case ,_snake_case ,rtol=1E-3 )
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
UpperCAmelCase_ : Dict = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Union[str, Any] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__A : List[str] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__A : Dict =(
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Any =True
__A : Dict =False
__A : Dict =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = TFBlenderbotModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self ,config_class=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_tokenizers
@require_tf
class _snake_case (unittest.TestCase):
__A : Optional[int] =["My friends are cool but they eat too many carbs."]
__A : Optional[Any] ="facebook/blenderbot-400M-distill"
@cached_property
def UpperCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text ,return_tensors="tf" )
UpperCAmelCase_ : Union[str, Any] = self.model.generate(
model_inputs.input_ids ,)
UpperCAmelCase_ : str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=_snake_case )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 71 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.