code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=1_3 , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[Any]=9_9 , lowerCAmelCase__ : Tuple=3_2 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Dict=3_7 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : List[str]=5_1_2 , lowerCAmelCase__ : List[str]=1_6 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Dict=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Optional[int] = 1_3
__SCREAMING_SNAKE_CASE : List[Any] = 7
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Optional[int] = 9_9
__SCREAMING_SNAKE_CASE : List[Any] = 3_8_4
__SCREAMING_SNAKE_CASE : Optional[int] = 2
__SCREAMING_SNAKE_CASE : Tuple = 4
__SCREAMING_SNAKE_CASE : List[str] = 3_7
__SCREAMING_SNAKE_CASE : List[Any] = """gelu"""
__SCREAMING_SNAKE_CASE : Tuple = 0.1
__SCREAMING_SNAKE_CASE : int = 0.1
__SCREAMING_SNAKE_CASE : str = 5_1_2
__SCREAMING_SNAKE_CASE : List[str] = 1_6
__SCREAMING_SNAKE_CASE : List[str] = 2
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.02
__SCREAMING_SNAKE_CASE : Any = 3
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : List[str] = 1_2_8
__SCREAMING_SNAKE_CASE : Any = 2
__SCREAMING_SNAKE_CASE : List[Any] = 9
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : Optional[int] = None
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = TFConvBertModel(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = TFConvBertForMaskedLM(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.num_labels
__SCREAMING_SNAKE_CASE : List[str] = TFConvBertForSequenceClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices
__SCREAMING_SNAKE_CASE : str = TFConvBertForMultipleChoice(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : int = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : Dict = TFConvBertForTokenClassification(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = TFConvBertForQuestionAnswering(config=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Tuple = config_and_inputs
__SCREAMING_SNAKE_CASE : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : List[Any] = False
_A : Dict = False
_A : List[Any] = False
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = TFConvBertModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : List[Any] = True
if hasattr(lowerCAmelCase__ , """use_cache""" ):
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : Tuple = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__SCREAMING_SNAKE_CASE : List[Any] = getattr(self.model_tester , """key_length""" , lowerCAmelCase__ )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = len(model(lowerCAmelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCAmelCase__ , """saved_model""" , """1""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.models.load_model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE : Dict = outputs["""encoder_hidden_states"""]
__SCREAMING_SNAKE_CASE : Optional[int] = outputs["""encoder_attentions"""]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = outputs["""hidden_states"""]
__SCREAMING_SNAKE_CASE : Any = outputs["""attentions"""]
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
__SCREAMING_SNAKE_CASE : List[Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(self.model_tester , """key_length""" , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = getattr(self.model_tester , """key_length""" , lowerCAmelCase__ )
def check_decoder_attentions_output(lowerCAmelCase__ : List[str] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowerCAmelCase__ )
self.assertEqual(out_len % 2 , 0 )
__SCREAMING_SNAKE_CASE : List[str] = outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ : Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
if self.is_encoder_decoder:
__SCREAMING_SNAKE_CASE : Dict = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_decoder_attentions_output(lowerCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : Optional[Any] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Dict = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) | 578 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def lowerCAmelCase_ ( _lowerCamelCase: List[Any]=None ):
if subparsers is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = subparsers.add_parser("""test""" )
else:
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=_lowerCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : str = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
__SCREAMING_SNAKE_CASE : Tuple = script_name
else:
__SCREAMING_SNAKE_CASE : Any = F"--config_file={args.config_file} {script_name}"
__SCREAMING_SNAKE_CASE : str = ["""accelerate-launch"""] + test_args.split()
__SCREAMING_SNAKE_CASE : Any = execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = test_command_parser()
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
test_command(_lowerCamelCase )
if __name__ == "__main__":
main() | 578 | 1 |
import argparse
__A ="""docs/source/_static/js/custom.js"""
def lowerCamelCase_ ( lowerCamelCase__ ):
with open(lowerCamelCase__ , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCamelCase_ = F'const stableVersion = \"v{version}\"\n'
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F' \"v{version}\": \"v{version}\",\n'
with open(lowerCamelCase__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
__A =parser.parse_args()
update_custom_js(args.version)
| 701 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A =logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> int:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = 1
lowerCamelCase_ = FrozenDict(lowercase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowercase , standard_warn=lowercase )
lowerCamelCase_ = dict(scheduler.config )
lowerCamelCase_ = True
lowerCamelCase_ = FrozenDict(lowercase )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowercase , segmentation_processor=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , )
def SCREAMING_SNAKE_CASE_( self , lowercase = "auto" ) -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
self.enable_attention_slicing(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase_ = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_( self ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , **lowercase , ) -> str:
lowerCamelCase_ = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowerCamelCase_ = self.segmentation_model(**lowercase )
lowerCamelCase_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ = self.numpy_to_pil(lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowercase , image=lowercase , mask_image=lowercase , height=lowercase , width=lowercase , num_inference_steps=lowercase , guidance_scale=lowercase , negative_prompt=lowercase , num_images_per_prompt=lowercase , eta=lowercase , generator=lowercase , latents=lowercase , output_type=lowercase , return_dict=lowercase , callback=lowercase , callback_steps=lowercase , )
| 313 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[int] ="timm_backbone"
def __init__( self ,_snake_case=None ,_snake_case=3 ,_snake_case=True ,_snake_case=True ,_snake_case=None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : str = features_only
UpperCAmelCase_ : int = use_pretrained_backbone
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = out_indices if out_indices is not None else (-1,)
| 71 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __UpperCAmelCase( lowercase_ ):
# vision encoder
if "img_encoder.pos_embed" in name:
_lowerCamelCase : Tuple = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : List[str] = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
_lowerCamelCase : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : Tuple = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Optional[int] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : List[Any] = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
_lowerCamelCase : int = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Dict = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
_lowerCamelCase : Tuple = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
_lowerCamelCase : List[str] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
_lowerCamelCase : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
_lowerCamelCase : str = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
_lowerCamelCase : str = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : str = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
_lowerCamelCase : Union[str, Any] = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : List[str] = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
_lowerCamelCase : Any = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def __UpperCAmelCase( lowercase_ , lowercase_ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Tuple = key.split('''.''' )
_lowerCamelCase, _lowerCamelCase : Dict = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : Optional[int] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : List[Any] = val[dim : dim * 2, :]
_lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
_lowerCamelCase : Union[str, Any] = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Any = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[int] = key.split('''.''' )
_lowerCamelCase : Optional[Any] = int(key_split[3] )
_lowerCamelCase : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : Any = val[:dim, :]
_lowerCamelCase : Optional[Any] = val[
dim : dim * 2, :
]
_lowerCamelCase : Dict = val[-dim:, :]
else:
_lowerCamelCase : List[Any] = val[:dim]
_lowerCamelCase : Tuple = val[dim : dim * 2]
_lowerCamelCase : str = val[-dim:]
else:
_lowerCamelCase : Optional[Any] = rename_key(lowercase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : List[str] = val.squeeze_()
else:
_lowerCamelCase : Tuple = val
return orig_state_dict
def __UpperCAmelCase( ):
_lowerCamelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_="groupvit-gcc-yfcc" , lowercase_=False ):
_lowerCamelCase : Optional[Any] = GroupViTConfig()
_lowerCamelCase : Any = GroupViTModel(lowercase_ ).eval()
_lowerCamelCase : Optional[Any] = torch.load(lowercase_ , map_location='''cpu''' )['''model''']
_lowerCamelCase : List[str] = convert_state_dict(lowercase_ , lowercase_ )
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase_ ) == 0)
# verify result
_lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : str = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowercase_ , padding=lowercase_ , return_tensors='''pt''' )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**lowercase_ )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : Dict = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Tuple = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , lowercase_ , atol=1e-3 )
processor.save_pretrained(lowercase_ )
model.save_pretrained(lowercase_ )
print('''Successfully saved processor and model to''' , lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowercase_ , organization='''nielsr''' )
model.push_to_hub(lowercase_ , organization='''nielsr''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
_lowerCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 114 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : str = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 267 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __UpperCamelCase ( lowercase__ ):
@staticmethod
@abstractmethod
def a__ ( _UpperCamelCase :ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def a__ ( self :Optional[Any] ):
raise NotImplementedError() | 267 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase : Union[str, Any] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = ['MobileViTFeatureExtractor']
_UpperCamelCase : Dict = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 396 | '''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_UpperCamelCase : Optional[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_UpperCamelCase : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_UpperCamelCase : Union[str, Any] = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_UpperCamelCase : int = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_UpperCamelCase : List[str] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_UpperCamelCase : List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def __snake_case ( lowerCAmelCase : int ):
__UpperCAmelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase )
return [m.group(0 ) for m in matches]
def __snake_case ( ):
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
__UpperCAmelCase = collections.defaultdict(lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCAmelCase ):
__UpperCAmelCase = None
if _re_tf_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''.join(camel_case_split(lowerCAmelCase )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(lowerCAmelCase )
all_models.sort()
__UpperCAmelCase = {'model_type': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = 'AutoTokenizer'
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(lowerCAmelCase )
def __snake_case ( lowerCAmelCase : Any ):
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCAmelCase , lowerCAmelCase ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(lowerCAmelCase , lowerCAmelCase ).values():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
model_names.append(lowerCAmelCase )
else:
model_names.extend(list(lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __snake_case ( lowerCAmelCase : str , lowerCAmelCase : int ):
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(lowerCAmelCase )
__UpperCAmelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowerCAmelCase )
__UpperCAmelCase = Dataset.from_json(lowerCAmelCase )
__UpperCAmelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowerCAmelCase ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCAmelCase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowerCAmelCase , 'pipeline_tags.json' ) )
if commit_sha is not None:
__UpperCAmelCase = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowerCAmelCase , repo_type='dataset' , token=lowerCAmelCase , commit_message=lowerCAmelCase , )
def __snake_case ( ):
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['pt']
if isinstance(lowerCAmelCase , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
__UpperCAmelCase = ', '.join(lowerCAmelCase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_UpperCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 396 | 1 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
snake_case_ = TypeVar('''KT''')
snake_case_ = TypeVar('''VT''')
class snake_case_ ( Generic[KT, VT]):
def __init__( self , __lowercase = "root" , __lowercase = None ) -> str:
lowerCamelCase : Tuple =key
lowerCamelCase : Optional[int] =value
lowerCamelCase : list[Node[KT, VT]] =[]
def __repr__( self ) -> str:
return F"Node({self.key}: {self.value})"
@property
def __lowercase ( self ) -> int:
return len(self.forward )
class snake_case_ ( Generic[KT, VT]):
def __init__( self , __lowercase = 0.5 , __lowercase = 1_6 ) -> List[Any]:
lowerCamelCase : Node[KT, VT] =Node[KT, VT]()
lowerCamelCase : List[str] =0
lowerCamelCase : str =p
lowerCamelCase : Tuple =max_level
def __str__( self ) -> str:
lowerCamelCase : int =list(self )
if len(__lowercase ) == 0:
return F"SkipList(level={self.level})"
lowerCamelCase : List[Any] =max((len(str(__lowercase ) ) for item in items) , default=4 )
lowerCamelCase : Optional[int] =max(__lowercase , 4 ) + 4
lowerCamelCase : Optional[Any] =self.head
lowerCamelCase : Tuple =[]
lowerCamelCase : str =node.forward.copy()
lines.append(F"[{node.key}]".ljust(__lowercase , '''-''' ) + '''* ''' * len(__lowercase ) )
lines.append(''' ''' * label_size + '''| ''' * len(__lowercase ) )
while len(node.forward ) != 0:
lowerCamelCase : List[Any] =node.forward[0]
lines.append(
F"[{node.key}]".ljust(__lowercase , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(__lowercase ) )
lowerCamelCase : Any =node.forward
lines.append('''None'''.ljust(__lowercase ) + '''* ''' * len(__lowercase ) )
return F"SkipList(level={self.level})\n" + "\n".join(__lowercase )
def __iter__( self ) -> List[Any]:
lowerCamelCase : int =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCamelCase : List[Any] =node.forward[0]
def __lowercase ( self ) -> int:
lowerCamelCase : Any =1
while random() < self.p and level < self.max_level:
level += 1
return level
def __lowercase ( self , __lowercase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
lowerCamelCase : Tuple =[]
lowerCamelCase : str =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCamelCase : Union[str, Any] =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __lowercase ( self , __lowercase ) -> Optional[int]:
lowerCamelCase , lowerCamelCase : Union[str, Any] =self._locate_node(__lowercase )
if node is not None:
for i, update_node in enumerate(__lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCamelCase : int =node.forward[i]
else:
lowerCamelCase : int =update_node.forward[:i]
def __lowercase ( self , __lowercase , __lowercase ) -> List[str]:
lowerCamelCase , lowerCamelCase : Union[str, Any] =self._locate_node(__lowercase )
if node is not None:
lowerCamelCase : Any =value
else:
lowerCamelCase : str =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __lowercase ):
update_vector.append(self.head )
lowerCamelCase : Dict =level
lowerCamelCase : List[str] =Node(__lowercase , __lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__lowercase )
else:
lowerCamelCase : Union[str, Any] =new_node
def __lowercase ( self , __lowercase ) -> VT | None:
lowerCamelCase , lowerCamelCase : List[str] =self._locate_node(__lowercase )
if node is not None:
return node.value
return None
def A__ ( ) -> Union[str, Any]:
lowerCamelCase : str =SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 1_2 )
skip_list.insert('''Key3''' , 4_1 )
skip_list.insert('''Key4''' , -1_9 )
lowerCamelCase : Tuple =skip_list.head
lowerCamelCase : Dict ={}
while node.level != 0:
lowerCamelCase : Tuple =node.forward[0]
lowerCamelCase : Union[str, Any] =node.value
assert len(SCREAMING_SNAKE_CASE_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def A__ ( ) -> Tuple:
lowerCamelCase : Dict =SkipList()
skip_list.insert('''Key1''' , 1_0 )
skip_list.insert('''Key1''' , 1_2 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 1_0 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 1_0 )
lowerCamelCase : Tuple =skip_list.head
lowerCamelCase : Optional[int] ={}
while node.level != 0:
lowerCamelCase : Any =node.forward[0]
lowerCamelCase : Dict =node.value
if len(SCREAMING_SNAKE_CASE_ ) != 4:
print()
assert len(SCREAMING_SNAKE_CASE_ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def A__ ( ) -> Optional[int]:
lowerCamelCase : Any =SkipList()
assert skip_list.find('''Some key''' ) is None
def A__ ( ) -> List[str]:
lowerCamelCase : List[str] =SkipList()
skip_list.insert('''Key2''' , 2_0 )
assert skip_list.find('''Key2''' ) == 2_0
skip_list.insert('''Some Key''' , 1_0 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 1_3 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 1_0
assert skip_list.find('''V''' ) == 1_3
def A__ ( ) -> Union[str, Any]:
lowerCamelCase : Any =SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def A__ ( ) -> List[Any]:
lowerCamelCase : Optional[Any] =SkipList()
skip_list.insert('''Key1''' , 1_2 )
skip_list.insert('''V''' , 1_3 )
skip_list.insert('''X''' , 1_4 )
skip_list.insert('''Key2''' , 1_5 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def A__ ( ) -> Optional[int]:
lowerCamelCase : Optional[Any] =SkipList()
skip_list.insert('''Key1''' , 1_2 )
skip_list.insert('''V''' , 1_3 )
skip_list.insert('''X''' , 1_4 )
skip_list.insert('''Key2''' , 1_5 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 1_4
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def A__ ( ) -> List[str]:
lowerCamelCase : Optional[Any] =SkipList()
skip_list.insert('''Key1''' , 1_2 )
skip_list.insert('''V''' , 1_3 )
skip_list.insert('''X''' , 1_4_2 )
skip_list.insert('''Key2''' , 1_5 )
skip_list.delete('''X''' )
def traverse_keys(SCREAMING_SNAKE_CASE_ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(SCREAMING_SNAKE_CASE_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def A__ ( ) -> int:
def is_sorted(SCREAMING_SNAKE_CASE_ ):
return all(next_item >= item for item, next_item in zip(SCREAMING_SNAKE_CASE_ , lst[1:] ) )
lowerCamelCase : Any =SkipList()
for i in range(1_0 ):
skip_list.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(SCREAMING_SNAKE_CASE_ ) )
def A__ ( ) -> str:
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def A__ ( ) -> Dict:
lowerCamelCase : Optional[int] =SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 262 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 262 | 1 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
_SCREAMING_SNAKE_CASE = {
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
__snake_case = set(SCREAMING_SNAKE_CASE )
return pairs
class __magic_name__ ( lowercase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Dict , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]="<s>" , snake_case_ : Any="</s>" , snake_case_ : Tuple="</s>" , snake_case_ : List[Any]="<s>" , snake_case_ : Optional[Any]="<unk>" , snake_case_ : Dict="<pad>" , snake_case_ : List[Any]="<mask>" , **snake_case_ : List[Any] , ):
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
__snake_case = vocab_file
__snake_case = merges_file
__snake_case = {}
__snake_case = 0
__snake_case = 1
__snake_case = 2
__snake_case = 3
self.add_from_file(snake_case_ )
__snake_case = {v: k for k, v in self.encoder.items()}
with open(snake_case_ , encoding="utf-8" ) as merges_handle:
__snake_case = merges_handle.read().split("\n" )[:-1]
__snake_case = [tuple(merge.split()[:-1] ) for merge in merges]
__snake_case = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__snake_case = {}
def lowerCAmelCase ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase ( self : str ):
return len(self.encoder )
def lowerCAmelCase ( self : List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Any , snake_case_ : Union[str, Any] ):
if token in self.cache:
return self.cache[token]
__snake_case = tuple(snake_case_ )
__snake_case = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__snake_case = get_pairs(snake_case_ )
if not pairs:
return token
while True:
__snake_case = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(snake_case_ ):
try:
__snake_case = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(snake_case_ )
__snake_case = new_word
if len(snake_case_ ) == 1:
break
else:
__snake_case = get_pairs(snake_case_ )
__snake_case = "@@ ".join(snake_case_ )
__snake_case = word[:-4]
__snake_case = word
return word
def lowerCAmelCase ( self : Any , snake_case_ : Any ):
__snake_case = []
__snake_case = re.findall(r"\S+\n?" , snake_case_ )
for token in words:
split_tokens.extend(list(self.bpe(snake_case_ ).split(" " ) ) )
return split_tokens
def lowerCAmelCase ( self : List[str] , snake_case_ : Union[str, Any] ):
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , snake_case_ : List[str] ):
return self.decoder.get(snake_case_ , self.unk_token )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : List[str] ):
__snake_case = " ".join(snake_case_ ).replace("@@ " , "" ).strip()
return out_string
def lowerCAmelCase ( self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not os.path.isdir(snake_case_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(
snake_case_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case_ ):
copyfile(self.merges_file , snake_case_ )
return out_vocab_file, out_merge_file
def lowerCAmelCase ( self : Optional[Any] , snake_case_ : str ):
if isinstance(snake_case_ , snake_case_ ):
try:
with open(snake_case_ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
__snake_case = f.readlines()
for lineTmp in lines:
__snake_case = lineTmp.strip()
__snake_case = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" )
__snake_case = line[:idx]
__snake_case = len(self.encoder )
| 163 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __magic_name__ ( lowercase__ ):
def __init__( self : str , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 163 | 1 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'up_blocks.{i}.attentions.{j}.'
lowerCAmelCase__ = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.conv.'
lowerCAmelCase__ = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{j}.'
lowerCAmelCase__ = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__lowercase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'encoder.down_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'down_blocks.{i}.downsamplers.0.'
lowerCAmelCase__ = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'up_blocks.{i}.upsamplers.0.'
lowerCAmelCase__ = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'decoder.up_blocks.{i}.resnets.{j}.'
lowerCAmelCase__ = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'mid_block.resnets.{i}.'
lowerCAmelCase__ = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _A ( A__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( A__ ):
"""simple docstring"""
__lowercase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__lowercase = v.replace(A__ , A__ )
__lowercase = v
__lowercase = {v: vae_state_dict[k] for k, v in mapping.items()}
__lowercase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
__lowercase = reshape_weight_for_sd(A__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = {}
__lowercase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
__lowercase = k[: -len('''.q_proj.weight''' )]
__lowercase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
__lowercase = [None, None, None]
__lowercase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
__lowercase = k[: -len('''.q_proj.bias''' )]
__lowercase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
__lowercase = [None, None, None]
__lowercase = v
continue
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__lowercase = textenc_pattern.sub(lambda A__ : protected[re.escape(m.group(0 ) )] , A__ )
__lowercase = torch.cat(A__ )
return new_state_dict
def _A ( A__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 707 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase__ = ['''gpt2''']
lowerCAmelCase__ = '''gpt2'''
if is_tf_available():
class lowercase_ (tf.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Tuple ):
super().__init__()
__lowercase = tokenizer
__lowercase = AutoConfig.from_pretrained(lowercase__ )
__lowercase = TFGPTaLMHeadModel.from_config(lowercase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='''text''' ),) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ):
__lowercase = self.tokenizer(lowercase__ )
__lowercase = tokenized['''input_ids'''].to_tensor()
__lowercase = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowercase = self.model(input_ids=lowercase__ ,attention_mask=lowercase__ )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
__lowercase = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowercase = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowercase = tokenizer([test_inputs] ,return_tensors='''tf''' )
__lowercase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowercase = python_outputs[key].numpy()
__lowercase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase__ ,tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.function(lowercase__ )
for test_inputs in self.test_sentences:
__lowercase = tf.constant(lowercase__ )
__lowercase = compiled_tokenizer(lowercase__ )
__lowercase = tf_tokenizer(lowercase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = ModelToSave(tokenizer=lowercase__ )
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = model.serving(lowercase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase = Path(lowercase__ ) / '''saved.model'''
tf.saved_model.save(lowercase__ ,lowercase__ ,signatures={'''serving_default''': model.serving} )
__lowercase = tf.saved_model.load(lowercase__ )
__lowercase = loaded_model.signatures['''serving_default'''](lowercase__ )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ) # Build model with some sample inputs
__lowercase = tf_tokenizer.get_config()
__lowercase = TFGPTaTokenizer.from_config(lowercase__ )
__lowercase = model_from_config(lowercase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowercase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__lowercase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowercase = tf_tokenizer(lowercase__ ,max_length=lowercase__ )
__lowercase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 624 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : List[Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a : Tuple = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : List[str] = 1_0, 0.0
__a : Dict = self.dummy_model()
__a : str = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
__a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config(steps_offset=1 )
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : Union[str, Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : List[str] = self.scheduler_classes[0]
__a : List[str] = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ )
__a , __a : Any = 1_0, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = self.dummy_model()
__a : int = self.dummy_sample_deter
__a : List[Any] = self.dummy_sample_deter + 0.1
__a : List[str] = self.dummy_sample_deter - 0.1
__a : Optional[Any] = samplea.shape[0]
__a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
__a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ )
__a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ )
__a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : List[str] = self.full_loop()
__a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.full_loop(prediction_type='v_prediction' )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 )
__a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
__a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 47 |
from ..utils import DummyObject, requires_backends
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class _UpperCamelCase( metaclass=__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 47 | 1 |
from typing import Any
import numpy as np
def UpperCamelCase_ ( __a ) -> bool:
return np.array_equal(__a , matrix.conjugate().T )
def UpperCamelCase_ ( __a , __a ) -> Any:
a__ : Union[str, Any] = v.conjugate().T
a__ : Any = v_star.dot(__a )
assert isinstance(__a , np.ndarray )
return (v_star_dot.dot(__a )) / (v_star.dot(__a ))
def UpperCamelCase_ ( ) -> None:
a__ : str = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a__ : Optional[int] = np.array([[1], [2], [3]] )
assert is_hermitian(__a ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(__a , __a ) )
a__ : Optional[Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__a ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(__a , __a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 151 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> Union[str, Any]:
a__ : Optional[Any] = {}
if train_file is not None:
a__ : str = [train_file]
if eval_file is not None:
a__ : Dict = [eval_file]
if test_file is not None:
a__ : Tuple = [test_file]
a__ : int = datasets.load_dataset("csv" , data_files=__a )
a__ : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : Any = features_name.pop(__a )
a__ : Union[str, Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : Any = {label: i for i, label in enumerate(__a )}
a__ : Union[str, Any] = tokenizer.model_input_names
a__ : Optional[int] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Any = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Any = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
a__ : Tuple = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Any = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : str = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Optional[int] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Any = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Tuple = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : List[str] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Optional[int] = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : str = trainer.evaluate()
a__ : List[str] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 151 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : float, A_ : float, A_ : float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__a = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__a = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __snake_case( _lowerCAmelCase ) -> str:
if "://" in dataset_path:
snake_case__ : Tuple = dataset_path.split("""://""" )[1]
return dataset_path
def __snake_case( _lowerCAmelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = not is_remote_filesystem(_lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) )
else:
fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase )
def __snake_case( ) -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case__ : Dict = None
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = threading.Lock()
| 374 | 0 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int | float:
if len(a_ ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a_ )
or left < -len(a_ )
or right >= len(a_ )
or right < -len(a_ )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
_UpperCAmelCase =(left + right) >> 1 # the middle
_UpperCAmelCase =find_max(a_ , a_ , a_ ) # find max in range[left, mid]
_UpperCAmelCase =find_max(a_ , mid + 1 , a_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 718 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__SCREAMING_SNAKE_CASE = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __a ( a, a, a, a, a ):
"""simple docstring"""
for attribute in key.split("." ):
_a = getattr(a, a )
if weight_type is not None:
_a = getattr(a, a ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
elif weight_type == "inv_freq":
_a = value
else:
_a = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __a ( a, a, a ):
"""simple docstring"""
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
a, a, a, a, hf_model.config.feat_extract_norm == "group", )
_a = True
else:
for key, mapped_key in MAPPING.items():
_a = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_a = True
if "*" in mapped_key:
_a = name.split(a )[0].split("." )[-2]
_a = mapped_key.replace("*", a )
if "pos_bias_u" in name:
_a = None
elif "pos_bias_v" in name:
_a = None
elif "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name:
_a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a = "weight"
elif "running_mean" in name:
_a = "running_mean"
elif "inv_freq" in name:
_a = "inv_freq"
elif "running_var" in name:
_a = "running_var"
elif "num_batches_tracked" in name:
_a = "num_batches_tracked"
else:
_a = None
set_recursively(a, a, a, a, a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F'Unused weights: {unused_weights}' )
def __a ( a, a, a, a, a ):
"""simple docstring"""
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a )
@torch.no_grad()
def __a ( a, a, a=None, a=None, a=True ):
"""simple docstring"""
if config_path is not None:
_a = WavaVecaConformerConfig.from_pretrained(a, hidden_act="swish" )
else:
_a = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_a = "rotary"
if is_finetuned:
if dict_path:
_a = Dictionary.load(a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a = target_dict.pad_index
_a = target_dict.bos_index
_a = target_dict.eos_index
_a = len(target_dict.symbols )
_a = os.path.join(a, "vocab.json" )
if not os.path.isdir(a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a ) )
return
os.makedirs(a, exist_ok=a )
_a = target_dict.indices
# fairseq has the <pad> and <s> switched
_a = 0
_a = 1
with open(a, "w", encoding="utf-8" ) as vocab_handle:
json.dump(a, a )
_a = WavaVecaCTCTokenizer(
a, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=a, )
_a = True if config.feat_extract_norm == "layer" else False
_a = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0, do_normalize=a, return_attention_mask=a, )
_a = WavaVecaProcessor(feature_extractor=a, tokenizer=a )
processor.save_pretrained(a )
_a = WavaVecaConformerForCTC(a )
else:
_a = WavaVecaConformerForPreTraining(a )
if is_finetuned:
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_a = argparse.Namespace(task="audio_pretraining" )
_a = fairseq.tasks.setup_task(a )
_a , _a , _a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=a )
_a = model[0].eval()
recursively_load_weights(a, a, not is_finetuned )
hf_wavavec.save_pretrained(a )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 388 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
__SCREAMING_SNAKE_CASE = False
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
return 12
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE_ ( self :str ):
torch.manual_seed(0 )
_a = 12
_a = 12
_a = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
_a = TransformeraDModel(**UpperCamelCase__ )
return model
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = "cpu"
_a = self.dummy_vqvae
_a = self.dummy_text_encoder
_a = self.dummy_tokenizer
_a = self.dummy_transformer
_a = VQDiffusionScheduler(self.num_embed )
_a = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
_a = VQDiffusionPipeline(
vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , )
_a = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
_a = "teddy bear playing in the pool"
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" )
_a = output.images
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipe(
[prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
_a = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
_a = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
_a = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
_a = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
_a = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type="np" , )
_a = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 388 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=1_3 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : int=2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , ) -> Union[str, Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = patch_size
lowercase_ = max_length
lowercase_ = num_mel_bins
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = frequency_stride
lowercase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase_ = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase_ = frequency_out_dimension * time_out_dimension
lowercase_ = num_patches + 2
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, input_values, labels
def _lowercase ( self : List[Any] ) -> Any:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
lowercase_ = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
a :List[Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
a :Optional[Any] = False
a :str = False
a :Optional[int] = False
a :int = False
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = ASTModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def _lowercase ( self : Tuple ) -> str:
pass
def _lowercase ( self : List[str] ) -> Union[str, Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def _lowercase ( self : int ) -> List[str]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''input_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Union[str, Any] ) -> Dict:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a ( ):
'''simple docstring'''
lowercase_ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
lowercase_ , lowercase_ = torchaudio.load(snake_case__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Tuple ) -> Any:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self : List[str] ) -> Any:
lowercase_ = self.default_feature_extractor
lowercase_ = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.default_feature_extractor
lowercase_ , lowercase_ = prepare_audio()
lowercase_ = audio.squeeze().numpy()
lowercase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase_ = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 409 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Dict = 'falcon'
a :Optional[Any] = ['past_key_values']
def __init__( self : str , SCREAMING_SNAKE_CASE_ : List[str]=6_5_0_2_4 , SCREAMING_SNAKE_CASE_ : Tuple=4_5_4_4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7_1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Tuple=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Tuple=1_1 , SCREAMING_SNAKE_CASE_ : int=1_1 , **SCREAMING_SNAKE_CASE_ : Any , ) -> Optional[Any]:
lowercase_ = vocab_size
# Backward compatibility with n_embed kwarg
lowercase_ = kwargs.pop('''n_embed''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = hidden_size if n_embed is None else n_embed
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = layer_norm_epsilon
lowercase_ = initializer_range
lowercase_ = use_cache
lowercase_ = hidden_dropout
lowercase_ = attention_dropout
lowercase_ = bos_token_id
lowercase_ = eos_token_id
lowercase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
lowercase_ = alibi
lowercase_ = new_decoder_architecture
lowercase_ = multi_query # Ignored when new_decoder_architecture is True
lowercase_ = parallel_attn
lowercase_ = bias
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Tuple ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return not self.alibi
| 409 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase ={
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ ( _a):
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"width_multiplier" ) )
class __magic_name__ :
def __init__( self : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Optional[Any]=1_3 ,__SCREAMING_SNAKE_CASE : Optional[int]=6_4 ,__SCREAMING_SNAKE_CASE : Dict=2 ,__SCREAMING_SNAKE_CASE : List[str]=3 ,__SCREAMING_SNAKE_CASE : int="swish" ,__SCREAMING_SNAKE_CASE : str=3 ,__SCREAMING_SNAKE_CASE : Optional[int]=3_2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : str=1_0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : int=0.25 ,__SCREAMING_SNAKE_CASE : Tuple=0.0 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.0 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = make_divisible(5_1_2 * width_multiplier ,divisor=8 )
UpperCAmelCase = hidden_act
UpperCAmelCase = conv_kernel_size
UpperCAmelCase = output_stride
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = use_labels
UpperCAmelCase = is_training
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = width_multiplier
UpperCAmelCase = ffn_dropout
UpperCAmelCase = attn_dropout
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self : Dict ):
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = MobileViTVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _a , _a , unittest.TestCase):
_UpperCAmelCase : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Any = False
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = MobileViTVaModelTester(self )
UpperCAmelCase = MobileViTVaConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,has_text_modality=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def _UpperCAmelCase ( self : Any ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Any ):
pass
def _UpperCAmelCase ( self : str ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 5
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase = 2
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Optional[int] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = MobileViTVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self : Any ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=__SCREAMING_SNAKE_CASE ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE ,target_sizes=[(5_0, 6_0)] )
UpperCAmelCase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape ,__SCREAMING_SNAKE_CASE )
| 333 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class snake_case_:
def __init__( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : int=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Tuple=9_9 , UpperCamelCase_ : Optional[int]=3_2 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : List[str]=3_7 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Tuple=5_1_2 , UpperCamelCase_ : int=1_6 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : str=4 , UpperCamelCase_ : List[Any]=None , ):
lowerCAmelCase : int = parent
lowerCAmelCase : List[str] = 1_3
lowerCAmelCase : str = 7
lowerCAmelCase : Dict = True
lowerCAmelCase : Dict = True
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : Union[str, Any] = 9_9
lowerCAmelCase : Dict = 3_8_4
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : Tuple = 3_7
lowerCAmelCase : Union[str, Any] = '''gelu'''
lowerCAmelCase : Union[str, Any] = 0.1
lowerCAmelCase : Optional[int] = 0.1
lowerCAmelCase : Dict = 5_1_2
lowerCAmelCase : int = 1_6
lowerCAmelCase : List[Any] = 2
lowerCAmelCase : Dict = 0.02
lowerCAmelCase : Dict = 3
lowerCAmelCase : Dict = 4
lowerCAmelCase : Optional[Any] = 1_2_8
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : int = 9
lowerCAmelCase : Dict = 1
lowerCAmelCase : Any = None
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Dict = None
lowerCAmelCase : Dict = None
lowerCAmelCase : List[str] = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : List[str] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Dict = TFConvBertModel(config=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase : Dict = [input_ids, input_mask]
lowerCAmelCase : str = model(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[int] = TFConvBertForMaskedLM(config=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int ):
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : str = TFConvBertForSequenceClassification(config=UpperCamelCase_ )
lowerCAmelCase : int = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : List[str] = self.num_choices
lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=UpperCamelCase_ )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ):
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : Tuple = TFConvBertForTokenClassification(config=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ):
lowerCAmelCase : Optional[int] = TFConvBertForQuestionAnswering(config=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowerCAmelCase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : str = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Any = config_and_inputs
lowerCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = TFConvBertModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : str = True
lowerCAmelCase : str = True
if hasattr(UpperCamelCase_ , '''use_cache''' ):
lowerCAmelCase : List[Any] = True
lowerCAmelCase : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , '''key_length''' , UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Dict = model_class(UpperCamelCase_ )
lowerCAmelCase : Dict = len(model(UpperCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ )
lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , '''saved_model''' , '''1''' )
lowerCAmelCase : Union[str, Any] = tf.keras.models.load_model(UpperCamelCase_ )
lowerCAmelCase : str = model(UpperCamelCase_ )
if self.is_encoder_decoder:
lowerCAmelCase : List[str] = outputs['''encoder_hidden_states''']
lowerCAmelCase : List[Any] = outputs['''encoder_attentions''']
else:
lowerCAmelCase : Optional[int] = outputs['''hidden_states''']
lowerCAmelCase : List[str] = outputs['''attentions''']
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowerCAmelCase : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase : List[str] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowerCAmelCase : Any = getattr(self.model_tester , '''key_length''' , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = getattr(self.model_tester , '''key_length''' , UpperCamelCase_ )
def check_decoder_attentions_output(UpperCamelCase_ : Any ):
lowerCAmelCase : str = len(UpperCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCamelCase_ : Any ):
lowerCAmelCase : Union[str, Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = True
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Any = len(UpperCamelCase_ )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
if self.is_encoder_decoder:
lowerCAmelCase : Any = model_class(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_decoder_attentions_output(UpperCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ )
lowerCAmelCase : List[Any] = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
# Check attention is always last and order is fine
lowerCAmelCase : Tuple = True
lowerCAmelCase : int = True
lowerCAmelCase : str = model_class(UpperCamelCase_ )
lowerCAmelCase : Any = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , UpperCamelCase_ )
check_encoder_attentions_output(UpperCamelCase_ )
@require_tf
class snake_case_( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowerCAmelCase : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : str = model(UpperCamelCase_ )[0]
lowerCAmelCase : List[Any] = [1, 6, 7_6_8]
self.assertEqual(output.shape , UpperCamelCase_ )
lowerCAmelCase : int = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 )
| 637 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : List[Any]=False , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : int=False , lowerCAmelCase__ : List[str]=19 , lowerCAmelCase__ : Optional[Any]=32 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Optional[int]=37 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : int=5_12 , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Optional[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = num_choices
SCREAMING_SNAKE_CASE : List[Any] = scope
def __lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def __lowercase ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = EsmForProteinFolding(config=lowerCAmelCase__ ).float()
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowerCAmelCase : str = False
_lowerCAmelCase : int = (EsmForProteinFolding,) if is_torch_available() else ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : Tuple = {} if is_torch_available() else {}
_lowerCAmelCase : int = False
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@unittest.skip('''Does not support attention outputs''' )
def __lowercase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip
def __lowercase ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __lowercase ( self : int ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def __lowercase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def __lowercase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def __lowercase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def __lowercase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def __lowercase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self : List[Any] ):
"""simple docstring"""
pass
@require_torch
class lowerCamelCase_ ( snake_case_ ):
@slow
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
SCREAMING_SNAKE_CASE : str = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ )['''positions''']
SCREAMING_SNAKE_CASE : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase__ , atol=1e-4 ) )
| 527 |
'''simple docstring'''
def UpperCAmelCase ( A : int ):
if not isinstance(A , A ):
SCREAMING_SNAKE_CASE : List[str] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(A )
if number < 0:
return False
SCREAMING_SNAKE_CASE : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 1 |
from __future__ import annotations
class __A:
def __init__( self, A, A ):
"""simple docstring"""
_UpperCamelCase = text, pattern
_UpperCamelCase = len(__A ), len(__A )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
for i in range(self.patLen - 1, -1, -1 ):
if char == self.pattern[i]:
return i
return -1
def _UpperCamelCase ( self, A ):
"""simple docstring"""
for i in range(self.patLen - 1, -1, -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_UpperCamelCase = self.mismatch_in_text(__A )
if mismatch_index == -1:
positions.append(__A )
else:
_UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
_UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase : int = 'ABAABA'
lowercase : List[str] = 'AB'
lowercase : Tuple = BoyerMooreSearch(text, pattern)
lowercase : List[str] = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 711 |
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = 0
for ch in input_str:
_UpperCamelCase = ord(lowerCAmelCase )
_UpperCamelCase = pow(2 , lowerCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 0 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : SplitDict ):
"""simple docstring"""
__a = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__a = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__a = None
# the split name of split_dict takes over the name of the split info object
__a = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE__ ), SplitInfo(dataset_name="""my_dataset""" )] )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 225 |
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowerCAmelCase : Dict = "src/transformers"
# Matches is_xxx_available()
_lowerCAmelCase : List[Any] = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_lowerCAmelCase : Tuple = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCAmelCase : Optional[int] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_lowerCAmelCase : Tuple = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_lowerCAmelCase : List[str] = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCAmelCase : Tuple = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCAmelCase : Dict = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCAmelCase : Any = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_lowerCAmelCase : Optional[int] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_lowerCAmelCase : List[str] = re.compile(r"^\s*try:")
# Catches a line with else:
_lowerCAmelCase : Optional[int] = re.compile(r"^\s*else:")
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE__ ) is None:
return None
_UpperCAmelCase : str = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE__ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : int = f.readlines()
_UpperCAmelCase : Optional[Any] = 0
while line_index < len(SCREAMING_SNAKE_CASE__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE__ ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase : List[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Tuple = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ).groups()[0]
_UpperCAmelCase : int = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_UpperCAmelCase : Optional[int] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
_UpperCAmelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_UpperCAmelCase : Union[str, Any] = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ) is not None:
_UpperCAmelCase : int = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(", " )
_UpperCAmelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ) is not None:
_UpperCAmelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(", " )
_UpperCAmelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_UpperCAmelCase : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_UpperCAmelCase : List[Any] = lines[line_index]
_UpperCAmelCase : Optional[Any] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase : Union[str, Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_UpperCAmelCase : Any = lines[line_index]
_UpperCAmelCase : Optional[int] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_UpperCAmelCase : Union[str, Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE__ : int ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase : Any = []
for key in import_dict_objects.keys():
_UpperCAmelCase : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
_UpperCAmelCase : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase : List[Any] = "base imports" if key == "none" else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __snake_case ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE__ ):
if "__init__.py" in files:
_UpperCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , "__init__.py" )
_UpperCAmelCase : Union[str, Any] = parse_init(SCREAMING_SNAKE_CASE__ )
if objects is not None:
_UpperCAmelCase : int = analyze_results(*SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : Dict = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("\n".join(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE__ ) )
def __snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE__ ) / folder).glob("*.py" ) ) ) == 0:
continue
_UpperCAmelCase : List[Any] = str((Path(SCREAMING_SNAKE_CASE__ ) / folder).relative_to(SCREAMING_SNAKE_CASE__ ) )
_UpperCAmelCase : Optional[int] = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE__ )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE__ ) / fname).relative_to(SCREAMING_SNAKE_CASE__ ) )
_UpperCAmelCase : Dict = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE__ )
return submodules
_lowerCAmelCase : Any = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def __snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_UpperCAmelCase : Optional[int] = spec.loader.load_module()
_UpperCAmelCase : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : Optional[Any] = "\n".join(f'- {module}' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f'{list_of_modules}\n'
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 289 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self : Tuple ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=__magic_name__ , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
SCREAMING_SNAKE_CASE_ = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=32 , )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(__magic_name__ )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__magic_name__ )
SCREAMING_SNAKE_CASE_ = CLIPTextModelWithProjection(__magic_name__ )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __A ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Union[str, Any]=0 ) -> Any:
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = image / 2 + 0.5
if str(__magic_name__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def __A ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = StableDiffusionXLImgaImgPipeline(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = sd_pipe(**__magic_name__ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : List[Any] ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __A ( self : int ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __A ( self : str ) -> str:
pass
def __A ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = StableDiffusionXLImgaImgPipeline(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(__magic_name__ )
SCREAMING_SNAKE_CASE_ = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
# forward without prompt embeds
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE_ = negative_prompt
SCREAMING_SNAKE_CASE_ = 3 * [inputs["prompt"]]
SCREAMING_SNAKE_CASE_ = sd_pipe(**__magic_name__ )
SCREAMING_SNAKE_CASE_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE_ = self.get_dummy_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = 3 * ["this is a negative prompt"]
SCREAMING_SNAKE_CASE_ = 3 * [inputs.pop("prompt" )]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = sd_pipe.encode_prompt(__magic_name__ , negative_prompt=__magic_name__ )
SCREAMING_SNAKE_CASE_ = sd_pipe(
**__magic_name__ , prompt_embeds=__magic_name__ , negative_prompt_embeds=__magic_name__ , pooled_prompt_embeds=__magic_name__ , negative_pooled_prompt_embeds=__magic_name__ , )
SCREAMING_SNAKE_CASE_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Dict="cpu" , __magic_name__ : Optional[int]=torch.floataa , __magic_name__ : List[Any]=0 ) -> str:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
SCREAMING_SNAKE_CASE_ = np.random.RandomState(__magic_name__ ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ )
SCREAMING_SNAKE_CASE_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __A ( self : str ) -> int:
SCREAMING_SNAKE_CASE_ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.get_inputs(__magic_name__ )
SCREAMING_SNAKE_CASE_ = pipe(**__magic_name__ ).images
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 356 | from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A : Any = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
A : Dict = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def a__ ( ):
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , bootstrap_aggregation=__UpperCamelCase , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , bootstrap_aggregation=__UpperCamelCase , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def a__ ( ):
SCREAMING_SNAKE_CASE_ = "rougeLsum"
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=[k] )[k]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def a__ ( ):
SCREAMING_SNAKE_CASE_ = ["rouge1", "rouge2", "rougeL"]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase , rouge_keys=__UpperCamelCase )
assert score_sep == score_no_sep
def a__ ( ):
SCREAMING_SNAKE_CASE_ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
SCREAMING_SNAKE_CASE_ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase ) == calculate_rouge(__UpperCamelCase , __UpperCamelCase , newline_sep=__UpperCamelCase )
def a__ ( ):
SCREAMING_SNAKE_CASE_ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
SCREAMING_SNAKE_CASE_ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , rouge_keys=["rougeLsum"] , newline_sep=__UpperCamelCase )["rougeLsum"]
SCREAMING_SNAKE_CASE_ = calculate_rouge(__UpperCamelCase , __UpperCamelCase , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def a__ ( ):
SCREAMING_SNAKE_CASE_ = Path("examples/seq2seq/test_data/wmt_en_ro" )
SCREAMING_SNAKE_CASE_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
| 356 | 1 |
import itertools
import math
def __magic_name__ ( __lowerCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __magic_name__ ( ) -> List[Any]:
__lowerCamelCase = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def __magic_name__ ( __lowerCAmelCase : int = 1_0001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 298 |
import argparse
import copy
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
with open(__lowerCAmelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str:
with open(__lowerCAmelCase ) as f:
__lowerCamelCase = f.read(1 )
__lowerCamelCase = start_node
__lowerCamelCase = []
__lowerCamelCase = start_node
__lowerCamelCase = 0
while visiting not in first_solution:
__lowerCamelCase = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__lowerCAmelCase ) and k[0] not in first_solution:
__lowerCamelCase = k[1]
__lowerCamelCase = k[0]
first_solution.append(__lowerCAmelCase )
__lowerCamelCase = distance_of_first_solution + int(__lowerCAmelCase )
__lowerCamelCase = best_node
first_solution.append(__lowerCAmelCase )
__lowerCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowerCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> Dict:
__lowerCamelCase = []
for n in solution[1:-1]:
__lowerCamelCase = solution.index(__lowerCAmelCase )
for kn in solution[1:-1]:
__lowerCamelCase = solution.index(__lowerCAmelCase )
if n == kn:
continue
__lowerCamelCase = copy.deepcopy(__lowerCAmelCase )
__lowerCamelCase = kn
__lowerCamelCase = n
__lowerCamelCase = 0
for k in _tmp[:-1]:
__lowerCamelCase = _tmp[_tmp.index(__lowerCAmelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowerCamelCase = distance + int(i[1] )
_tmp.append(__lowerCAmelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowerCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowerCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase = 1
__lowerCamelCase = first_solution
__lowerCamelCase = []
__lowerCamelCase = distance_of_first_solution
__lowerCamelCase = solution
while count <= iters:
__lowerCamelCase = find_neighborhood(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = 0
__lowerCamelCase = neighborhood[index_of_best_solution]
__lowerCamelCase = len(__lowerCAmelCase ) - 1
__lowerCamelCase = False
while not found:
__lowerCamelCase = 0
while i < len(__lowerCAmelCase ):
if best_solution[i] != solution[i]:
__lowerCamelCase = best_solution[i]
__lowerCamelCase = solution[i]
break
__lowerCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowerCamelCase = True
__lowerCamelCase = best_solution[:-1]
__lowerCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowerCamelCase = cost
__lowerCamelCase = solution
else:
__lowerCamelCase = index_of_best_solution + 1
__lowerCamelCase = neighborhood[index_of_best_solution]
if len(__lowerCAmelCase ) >= size:
tabu_list.pop(0 )
__lowerCamelCase = count + 1
return best_solution_ever, best_cost
def __magic_name__ ( __lowerCAmelCase : Union[str, Any]=None ) -> Any:
__lowerCamelCase = generate_neighbours(args.File )
__lowerCamelCase , __lowerCamelCase = generate_first_solution(
args.File , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = tabu_search(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 298 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class A__:
lowerCAmelCase = MBartConfig
lowerCAmelCase = {}
lowerCAmelCase = '''gelu'''
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Any=20 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict['''input_ids''']
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['''attention_mask'''][:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['''head_mask''']
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
__SCREAMING_SNAKE_CASE = past_key_values[1]
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ) -> Optional[int]:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFMBartModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class A__( unittest.TestCase ):
lowerCAmelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
lowerCAmelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
lowerCAmelCase = '''facebook/mbart-large-en-ro'''
@cached_property
def _a ( self : Optional[int] ) -> str:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE )
self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE )
def _a ( self : Any , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
__SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
return generated_words
@slow
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 707 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={"vocab_file": "spiece.model"}
lowerCAmelCase__ ={
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
lowerCAmelCase__ ={
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
__SCREAMING_SNAKE_CASE = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__SCREAMING_SNAKE_CASE = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__SCREAMING_SNAKE_CASE = '''<|endoftext|>''' if eos_token is None else eos_token
__SCREAMING_SNAKE_CASE = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__SCREAMING_SNAKE_CASE = unk_token if pad_token is None else pad_token
__SCREAMING_SNAKE_CASE = eos_token if bos_token is None else bos_token
else:
__SCREAMING_SNAKE_CASE = '''<pad>''' if pad_token is None else pad_token
__SCREAMING_SNAKE_CASE = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
# Used for whitespace normalization in input texts
# fmt : off
__SCREAMING_SNAKE_CASE = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__SCREAMING_SNAKE_CASE = re.compile(
f"""[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" )
def __getstate__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : int , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return len(self.sp_model )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.non_printing_characters_re.sub('''''' , __SCREAMING_SNAKE_CASE )
# Normalize whitespaces
__SCREAMING_SNAKE_CASE = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__SCREAMING_SNAKE_CASE = unicodedata.normalize('''NFC''' , __SCREAMING_SNAKE_CASE )
return text
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE )
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
@staticmethod
def _a ( __SCREAMING_SNAKE_CASE : str ) -> str:
"""simple docstring"""
return out_string
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = ''''''
__SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string
def _a ( self : Union[str, Any] ) -> Dict[str, int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = self.preprocess_text(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = [self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text]
__SCREAMING_SNAKE_CASE = self.sp_model.encode(__SCREAMING_SNAKE_CASE )
if return_tensors is True or return_tensors == "pt":
__SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE )
return token_ids
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str:
"""simple docstring"""
return self.sp_model.decode(__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__SCREAMING_SNAKE_CASE = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__SCREAMING_SNAKE_CASE ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__SCREAMING_SNAKE_CASE )
| 690 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=13 ,lowerCamelCase_=7 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=99 ,lowerCamelCase_=32 ,lowerCamelCase_=5 ,lowerCamelCase_=4 ,lowerCamelCase_=37 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=512 ,lowerCamelCase_=16 ,lowerCamelCase_=2 ,lowerCamelCase_=0.02 ,lowerCamelCase_=4 ,) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : str = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : Union[str, Any] = use_attention_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Any = num_hidden_layers
UpperCAmelCase__ : Tuple = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : Dict = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : int = type_vocab_size
UpperCAmelCase__ : str = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[Any] = num_choices
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : Tuple = None
if self.use_attention_mask:
UpperCAmelCase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase__ : Optional[Any] = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowercase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[int] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : int = FlaxBertModelTester(self )
@slow
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained('''bert-base-cased''' )
UpperCAmelCase__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 614 | '''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase__ : Union[str, Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCamelCase__ : Tuple = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase__ : Optional[Any] = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase__ : List[str] = sorted(arg_to_scheduler.keys())
UpperCamelCase__ : int = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class _lowercase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=None ,lowerCamelCase_="base" ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[Any] = Path(self.hparams.output_dir )
UpperCAmelCase__ : Union[str, Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path ,**({'''num_labels''': num_labels} if num_labels is not None else {}) ,cache_dir=lowerCamelCase_ ,**lowerCamelCase_ ,)
else:
UpperCAmelCase__ : PretrainedConfig = config
UpperCAmelCase__ : Union[str, Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams ,lowerCamelCase_ ,lowerCamelCase_ ):
assert hasattr(self.config ,lowerCamelCase_ ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config ,lowerCamelCase_ ,getattr(self.hparams ,lowerCamelCase_ ) )
if tokenizer is None:
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path ,cache_dir=lowerCamelCase_ ,)
else:
UpperCAmelCase__ : PreTrainedTokenizer = tokenizer
UpperCAmelCase__ : Tuple = MODEL_MODES[mode]
if model is None:
UpperCAmelCase__ : List[str] = self.model_type.from_pretrained(
self.hparams.model_name_or_path ,from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) ,config=self.config ,cache_dir=lowerCamelCase_ ,)
else:
UpperCAmelCase__ : Optional[int] = model
def lowerCAmelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_type.from_pretrained(*lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : str = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase__ : List[Any] = get_schedule_func(
self.opt ,num_warmup_steps=self.hparams.warmup_steps ,num_training_steps=self.total_steps() )
UpperCAmelCase__ : Optional[int] = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = self.model
UpperCAmelCase__ : Optional[Any] = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase__ : List[Any] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase__ : Tuple = Adafactor(
lowerCamelCase_ ,lr=self.hparams.learning_rate ,scale_parameter=lowerCamelCase_ ,relative_step=lowerCamelCase_ )
else:
UpperCAmelCase__ : Optional[int] = AdamW(
lowerCamelCase_ ,lr=self.hparams.learning_rate ,eps=self.hparams.adam_epsilon )
UpperCAmelCase__ : Optional[int] = optimizer
UpperCAmelCase__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
'''simple docstring'''
return self.validation_step(lowerCamelCase_ ,lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.validation_end(lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = max(1 ,self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase__ : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Dict:
'''simple docstring'''
if stage == "test":
UpperCAmelCase__ : List[str] = len(self.test_dataloader().dataset )
else:
UpperCAmelCase__ : Optional[int] = self.get_dataloader('''train''' ,self.hparams.train_batch_size ,shuffle=lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = len(self.train_dataloader().dataset )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = False ) -> str:
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return self.train_loader
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.get_dataloader('''dev''' ,self.hparams.eval_batch_size ,shuffle=lowerCamelCase_ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
return self.get_dataloader('''test''' ,self.hparams.eval_batch_size ,shuffle=lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir ,'''cached_{}_{}_{}'''.format(
lowerCamelCase_ ,list(filter(lowerCamelCase_ ,self.hparams.model_name_or_path.split('''/''' ) ) ).pop() ,str(self.hparams.max_seq_length ) ,) ,)
@pl.utilities.rank_zero_only
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase__ : str = self.step_count
self.model.save_pretrained(lowerCamelCase_ )
self.tokenizer.save_pretrained(lowerCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' ,default=lowerCamelCase_ ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help='''Path to pretrained model or model identifier from huggingface.co/models''' ,)
parser.add_argument(
'''--config_name''' ,default='''''' ,type=lowerCamelCase_ ,help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' ,default=lowerCamelCase_ ,type=lowerCamelCase_ ,help='''Pretrained tokenizer name or path if not the same as model_name''' ,)
parser.add_argument(
'''--cache_dir''' ,default=str(Path(lowerCamelCase_ ).parent / '''test_run''' / '''cache''' ) ,type=lowerCamelCase_ ,help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' ,)
parser.add_argument(
'''--encoder_layerdrop''' ,type=lowerCamelCase_ ,help='''Encoder layer dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--decoder_layerdrop''' ,type=lowerCamelCase_ ,help='''Decoder layer dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--dropout''' ,type=lowerCamelCase_ ,help='''Dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--attention_dropout''' ,type=lowerCamelCase_ ,help='''Attention dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument('''--learning_rate''' ,default=5e-5 ,type=lowerCamelCase_ ,help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' ,default='''linear''' ,choices=lowerCamelCase_ ,metavar=lowerCamelCase_ ,type=lowerCamelCase_ ,help='''Learning rate scheduler''' ,)
parser.add_argument('''--weight_decay''' ,default=0.0 ,type=lowerCamelCase_ ,help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=lowerCamelCase_ ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=lowerCamelCase_ ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' ,default=4 ,type=lowerCamelCase_ ,help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' ,dest='''max_epochs''' ,default=3 ,type=lowerCamelCase_ )
parser.add_argument('''--train_batch_size''' ,default=32 ,type=lowerCamelCase_ )
parser.add_argument('''--eval_batch_size''' ,default=32 ,type=lowerCamelCase_ )
parser.add_argument('''--adafactor''' ,action='''store_true''' )
class _lowercase ( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowercase ( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCamelCase_ )
class _lowercase ( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase__ : Tuple = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase__ : Union[str, Any] = trainer.callback_metrics
# Log results
for key in sorted(lowerCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowerCamelCase_ ,str(metrics[key] ) ) )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase__ : List[Any] = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase__ : Dict = os.path.join(pl_module.hparams.output_dir ,'''test_results.txt''' )
with open(lowerCamelCase_ ,'''w''' ) as writer:
for key in sorted(lowerCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowerCamelCase_ ,str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowerCamelCase_ ,str(metrics[key] ) ) )
def __UpperCamelCase( _A : Tuple , _A : str ):
'''simple docstring'''
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_A ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_A , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_A )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_A , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_A , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_A , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_A ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_A , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def __UpperCamelCase( _A : BaseTransformer , _A : argparse.Namespace , _A : Optional[Any]=None , _A : int=True , _A : Any=[] , _A : Optional[Any]=None , _A : Tuple=None , **_A : Dict , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCAmelCase__ : Tuple = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_A )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase__ : List[str] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_A )
if logging_callback is None:
UpperCAmelCase__ : Dict = LoggingCallback()
UpperCAmelCase__ : Any = {}
if args.fpaa:
UpperCAmelCase__ : List[Any] = 16
if args.gpus > 1:
UpperCAmelCase__ : Union[str, Any] = '''auto'''
UpperCAmelCase__ : List[Any] = '''ddp'''
UpperCAmelCase__ : Any = args.accumulate_grad_batches
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : List[str] = '''auto'''
UpperCAmelCase__ : Dict = pl.Trainer.from_argparse_args(
_A , weights_summary=_A , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_A , val_check_interval=1 , num_sanity_val_steps=2 , **_A , )
if args.do_train:
trainer.fit(_A )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 614 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
lowerCamelCase__ = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
lowerCamelCase__ = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
lowerCamelCase__ = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="binary" , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Tuple = fa_score(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , pos_label=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE , sample_weight=_SCREAMING_SNAKE_CASE )
return {"f1": float(_SCREAMING_SNAKE_CASE ) if score.size == 1 else score} | 711 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'BlipImageProcessor'
A_ : int = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = False
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCAmelCase : Optional[int] = self.tokenizer
__lowerCAmelCase : List[str] = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
return text_encoding
# add pixel_values
__lowerCAmelCase : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Optional[int] = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
__lowerCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(_SCREAMING_SNAKE_CASE )
return encoding_image_processor
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.tokenizer.model_input_names
__lowerCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 549 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = OpenAIGPTTokenizer
__snake_case = OpenAIGPTTokenizerFast
__snake_case = True
__snake_case = False
def _snake_case ( self: Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowerCamelCase : Union[str, Any] = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : Optional[int] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: List[str] , a: List[Any] ):
return "lower newer", "lower newer"
def _snake_case ( self: Tuple ):
__lowerCamelCase : Dict = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase : Dict = 'lower'
__lowerCamelCase : Dict = ['low', 'er</w>']
__lowerCamelCase : Optional[Any] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : Tuple = tokens + ['<unk>']
__lowerCamelCase : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _snake_case ( self: Dict , a: str=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[str] = self.rust_tokenizer_class.from_pretrained(a , **a )
# Simple input
__lowerCamelCase : Union[str, Any] = 'This is a simple input'
__lowerCamelCase : Optional[int] = ['This is a simple input 1', 'This is a simple input 2']
__lowerCamelCase : Dict = ('This is a simple input', 'This is a pair')
__lowerCamelCase : Tuple = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Simple input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
# Pair input
self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' )
# Pair input
self.assertRaises(
a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , )
def _snake_case ( self: Any ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A_ ( __UpperCamelCase ):
'''simple docstring'''
pass
| 669 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 1 |
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
'''simple docstring'''
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase = int(max(0 , i - limit ) )
_UpperCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase = F"{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase = 0.0
else:
_UpperCAmelCase = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 639 |
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639 | 1 |
def A__ ( snake_case_ : int , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[Any] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE__: Any= mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ )
else:
SCREAMING_SNAKE_CASE__: int= max(
mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ ) , mf_knapsack(i - 1 , snake_case__ , snake_case__ , j - wt[i - 1] ) + val[i - 1] , )
SCREAMING_SNAKE_CASE__: Tuple= val
return f[i][j]
def A__ ( snake_case_ : str , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: Optional[int]= [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE__: List[str]= max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE__: List[str]= dp[i - 1][w_]
return dp[n][w_], dp
def A__ ( snake_case_ : int , snake_case_ : list , snake_case_ : list ):
if not (isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
SCREAMING_SNAKE_CASE__: Tuple= len(snake_case__ )
if num_items != len(snake_case__ ):
SCREAMING_SNAKE_CASE__: Tuple= (
'''The number of weights must be the same as the number of values.\n'''
F'But got {num_items} weights and {len(snake_case__ )} values'
)
raise ValueError(snake_case__ )
for i in range(snake_case__ ):
if not isinstance(wt[i] , snake_case__ ):
SCREAMING_SNAKE_CASE__: List[Any]= (
'''All weights must be integers but got weight of '''
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(snake_case__ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__: List[Any]= set()
_construct_solution(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return optimal_val, example_optional_set
def A__ ( snake_case_ : list , snake_case_ : list , snake_case_ : int , snake_case_ : int , snake_case_ : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case__ , snake_case__ , i - 1 , snake_case__ , snake_case__ )
else:
optimal_set.add(snake_case__ )
_construct_solution(snake_case__ , snake_case__ , i - 1 , j - wt[i - 1] , snake_case__ )
if __name__ == "__main__":
lowercase_ : Optional[Any] = [3, 2, 4, 4]
lowercase_ : Dict = [4, 3, 2, 3]
lowercase_ : Optional[Any] = 4
lowercase_ : str = 6
lowercase_ : List[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase_ , lowercase_ : Dict = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase_ , lowercase_ : List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 64 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_UpperCamelCase = 2048
_UpperCamelCase = 4096
_UpperCamelCase = 42
_UpperCamelCase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_UpperCamelCase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def UpperCamelCase_( snake_case__: Any ) -> List[Any]:
def choose_first(snake_case__: List[Any] , snake_case__: Any=False ):
assert isinstance(snake_case__ , snake_case__ )
if len(snake_case__ ) == 1:
UpperCAmelCase__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
UpperCAmelCase__ = {'id': example['id']}
UpperCAmelCase__ = example['annotations']
UpperCAmelCase__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase__ = ['yes'] if 1 in yes_no_answer else ['no']
UpperCAmelCase__ = UpperCAmelCase__ = []
UpperCAmelCase__ = UpperCAmelCase__ = []
UpperCAmelCase__ = ['<cls>']
else:
UpperCAmelCase__ = ['short']
UpperCAmelCase__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
UpperCAmelCase__ = ['long']
UpperCAmelCase__ = choose_first(annotation['long_answer'] , is_long_answer=snake_case__ )
UpperCAmelCase__ = []
answer.update(snake_case__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = False
UpperCAmelCase__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , snake_case__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def UpperCamelCase_( snake_case__: Any , snake_case__: Optional[int]=False ) -> Dict:
UpperCAmelCase__ = _get_single_answer(snake_case__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ = example['document']['tokens']
UpperCAmelCase__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(snake_case__ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase__ = example['document']['tokens']
UpperCAmelCase__ = answer['start_token']
UpperCAmelCase__ = answer['end_token']
UpperCAmelCase__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase__ = doc['is_html'][answer['start_token'] : answer['end_token']]
UpperCAmelCase__ = doc['token'][answer['start_token'] : answer['end_token']]
UpperCAmelCase__ = ' '.join([old[i] for i in range(len(snake_case__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , snake_case__ , end='\n' )
print('Old:' , snake_case__ , end='\n\n' )
return {
"context": " ".join(snake_case__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCamelCase_( snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: List[Any]=20_48 , snake_case__: Optional[int]=40_96 , snake_case__: Union[str, Any]=True ) -> Dict:
# overlap will be of doc_stride - q_len
UpperCAmelCase__ = get_context_and_ans(snake_case__ , assertion=snake_case__ )
UpperCAmelCase__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
UpperCAmelCase__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = input_ids[:q_len]
UpperCAmelCase__ = range(snake_case__ , len(snake_case__ ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase__ = i + max_length - q_len
UpperCAmelCase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(snake_case__ ),
"end_token": [-1_00] * len(snake_case__ ),
"category": category,
},
}
UpperCAmelCase__ = out['context'].split()
UpperCAmelCase__ = splitted_context[answer['end_token']]
UpperCAmelCase__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=snake_case__ , ).input_ids )
UpperCAmelCase__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=snake_case__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase__ = len(tokenizer(snake_case__ , add_special_tokens=snake_case__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
UpperCAmelCase__ = answer['start_token']
UpperCAmelCase__ = answer['end_token']
if assertion:
UpperCAmelCase__ = tokenizer.decode(snake_case__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , snake_case__ , end='\n\n' )
if len(snake_case__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase__ = input_ids[:q_len]
UpperCAmelCase__ = range(snake_case__ , len(snake_case__ ) , max_length - doc_stride )
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase__ = i + max_length - q_len
UpperCAmelCase__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase__ = start_token - i + q_len
UpperCAmelCase__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
UpperCAmelCase__ = -1_00
UpperCAmelCase__ = -1_00
answers_category.append('null' )
UpperCAmelCase__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(snake_case__ )
answers_end_token.append(snake_case__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(snake_case__ ) )
print('Old:' , tokenizer.decode(snake_case__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: Optional[int] , snake_case__: List[Any]=20_48 , snake_case__: Tuple=40_96 , snake_case__: Optional[int]=False ) -> str:
UpperCAmelCase__ = get_strided_contexts_and_ans(
snake_case__ , snake_case__ , doc_stride=snake_case__ , max_length=snake_case__ , assertion=snake_case__ , )
return example
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: str ) -> Tuple:
with jsonlines.open(snake_case__ , 'a' ) as writer:
for example in tqdm(snake_case__ , total=len(snake_case__ ) , desc='Saving samples ... ' ):
UpperCAmelCase__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_UpperCamelCase = load_dataset('''natural_questions''')
_UpperCamelCase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_UpperCamelCase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_UpperCamelCase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_UpperCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_UpperCamelCase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_UpperCamelCase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 146 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _A ( __snake_case :np.ndarray , __snake_case :np.ndarray , __snake_case :np.ndarray , __snake_case :int , __snake_case :int ) -> np.ndarray:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = cva.getAffineTransform(__snake_case , __snake_case )
return cva.warpAffine(__snake_case , __snake_case , (rows, cols) )
if __name__ == "__main__":
# read original image
_snake_case : Any = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
_snake_case : str = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_snake_case , _snake_case : List[str] = gray_img.shape
# set different points to rotate image
_snake_case : Union[str, Any] = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
_snake_case : Dict = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
_snake_case : List[Any] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
_snake_case : List[str] = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
_snake_case : Union[str, Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_snake_case : str = plt.figure(1)
_snake_case : int = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 214 |
def _A ( __snake_case :list[int] ) -> float:
"""simple docstring"""
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__SCREAMING_SNAKE_CASE = sum(__snake_case ) / len(__snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214 | 1 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__snake_case , __snake_case ):
raise TypeError("Input value must be a 'int' type" )
return bin(__snake_case ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 436 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
__snake_case : Union[str, Any] = {'target_lang': 'fi', 'source_lang': 'en'}
__snake_case : Union[str, Any] = '>>zh<<'
__snake_case : List[str] = 'Helsinki-NLP/'
if is_torch_available():
__snake_case : Optional[int] = 'pt'
elif is_tf_available():
__snake_case : List[Any] = 'tf'
else:
__snake_case : Union[str, Any] = 'jax'
@require_sentencepiece
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MarianTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Dict = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__lowerCAmelCase : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE))))
__lowerCAmelCase : int = Path(self.tmpdirname)
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["vocab"])
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"])
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["source_spm"])
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["target_spm"])
__lowerCAmelCase : Tuple = MarianTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self: Dict , **_SCREAMING_SNAKE_CASE: List[str]) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Dict:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "</s>"
__lowerCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "</s>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "<pad>")
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 9)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""")
__lowerCAmelCase : int = en_de_tokenizer(["I am a small frog"] , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , batch.input_ids[0])
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = [x.name for x in Path(_SCREAMING_SNAKE_CASE).glob("*")]
self.assertIn("source.spm" , _SCREAMING_SNAKE_CASE)
MarianTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : int = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertEqual(batch.input_ids.shape , (2, 512))
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : str = tok(["I am a tiny frog", "I am a small frog"] , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE)
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertEqual(batch_smaller.input_ids.shape , (2, 10))
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs")
__lowerCAmelCase : List[str] = "Tämä on testi"
__lowerCAmelCase : int = "This is a test"
__lowerCAmelCase : Union[str, Any] = [76, 7, 2047, 2]
__lowerCAmelCase : Dict = [69, 12, 11, 940, 2]
__lowerCAmelCase : List[str] = tokenizer(_SCREAMING_SNAKE_CASE).input_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = tokenizer(text_target=_SCREAMING_SNAKE_CASE).input_ids
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) | 293 | 0 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Union[str, Any] = SavedModel()
_snake_case : Optional[Any] = []
with open(os.path.join(snake_case__ , """utils""" , """tf_ops""" , """onnx.json""" ) ) as f:
_snake_case : Any = json.load(snake_case__ )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(snake_case__ )] )
with open(snake_case__ , """rb""" ) as f:
saved_model.ParseFromString(f.read() )
_snake_case : List[Any] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_snake_case : Tuple = sorted(snake_case__ )
_snake_case : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(snake_case__ )
if strict and len(snake_case__ ) > 0:
raise Exception(F"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(snake_case__ ) > 0:
print(F"Found the following incompatible ops for the opset {opset}:" )
print(*snake_case__ , sep="""\n""" )
else:
print(F"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
A_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 711 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 | 0 |
"""simple docstring"""
def __A ( a_ :Tuple , a_ :Union[str, Any] , a_ :int=False) -> List[str]:
if isinstance(a_ , a_) and isinstance(a_ , a_):
__a : List[str] = len(set_a.intersection(a_))
if alternative_union:
__a : List[str] = len(a_) + len(a_)
else:
__a : int = len(set_a.union(a_))
return intersection / union
if isinstance(a_ , (list, tuple)) and isinstance(a_ , (list, tuple)):
__a : Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
__a : Union[str, Any] = len(a_) + len(a_)
return len(a_) / union
else:
__a : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(a_) / len(a_)
return len(a_) / len(a_)
return None
if __name__ == "__main__":
A = {'''a''', '''b''', '''c''', '''d''', '''e'''}
A = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b)) | 52 |
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Optional[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Tuple , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : Any , **lowerCamelCase__ : Union[str, Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[str] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : str ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : str , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Optional[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Tuple , *lowerCamelCase__ : int , **lowerCamelCase__ : Dict ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : int , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : str ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[Any] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Tuple , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : Dict , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : List[str] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : List[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ):
requires_backends(cls , ['''flax'''] )
class __magic_name__ ( metaclass=snake_case ):
_lowerCAmelCase = ["flax"]
def __init__( self : int , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ):
requires_backends(self , ['''flax'''] )
@classmethod
def _A ( cls : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ):
requires_backends(cls , ['''flax'''] )
@classmethod
def _A ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ):
requires_backends(cls , ['''flax'''] )
| 348 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
A: int = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
def _UpperCAmelCase ( a : str , a : str ) -> float:
"""simple docstring"""
def get_matched_characters(a : str , a : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : Optional[int] = int(max(0 , i - limit ) )
lowercase_ : Union[str, Any] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a )
lowercase_ : Union[str, Any] = f"{_stra[0:_stra.index(a )]} {_stra[_stra.index(a ) + 1:]}"
return "".join(a )
# matching characters
lowercase_ : Union[str, Any] = get_matched_characters(a , a )
lowercase_ : Optional[Any] = get_matched_characters(a , a )
lowercase_ : Optional[int] = len(a )
# transposition
lowercase_ : Dict = (
len([(ca, ca) for ca, ca in zip(a , a ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : List[str] = 0.0
else:
lowercase_ : Any = (
1
/ 3
* (
match_count / len(a )
+ match_count / len(a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 7 | 1 |
import re
import string
import numpy as np
import datasets
_lowerCAmelCase : str = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
_lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
_lowerCAmelCase : int = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def lowerCamelCase__ ( self : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : Tuple=None , __snake_case : Optional[int]=False , __snake_case : Union[str, Any]=False , __snake_case : List[Any]=False , ) -> Optional[int]:
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase = np.array([re.sub(__snake_case , '' , __snake_case ) for x in predictions] )
lowerCamelCase = np.array([re.sub(__snake_case , '' , __snake_case ) for x in references] )
else:
lowerCamelCase = np.asarray(__snake_case )
lowerCamelCase = np.asarray(__snake_case )
if ignore_case:
lowerCamelCase = np.char.lower(__snake_case )
lowerCamelCase = np.char.lower(__snake_case )
if ignore_punctuation:
lowerCamelCase = string.punctuation.maketrans('' , '' , string.punctuation )
lowerCamelCase = np.char.translate(__snake_case , table=__snake_case )
lowerCamelCase = np.char.translate(__snake_case , table=__snake_case )
if ignore_numbers:
lowerCamelCase = string.digits.maketrans('' , '' , string.digits )
lowerCamelCase = np.char.translate(__snake_case , table=__snake_case )
lowerCamelCase = np.char.translate(__snake_case , table=__snake_case )
lowerCamelCase = predictions == references
return {"exact_match": np.mean(__snake_case ) * 100}
| 246 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase = 1_0
lowerCamelCase = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
lowerCamelCase = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(UpperCamelCase_ ) ),
} , features=UpperCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=UpperCamelCase_ )
return filename
# FILE_CONTENT + files
_lowerCAmelCase : List[str] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt'
lowerCamelCase = FILE_CONTENT
with open(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
import bza
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with bza.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with gzip.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with lza.frame.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(UpperCamelCase_ , 'w' ) as archive:
archive.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
import tarfile
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
import lzma
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with lzma.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
import zipfile
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with zstd.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.xml'
lowerCamelCase = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ )
return filename
_lowerCAmelCase : int = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_lowerCAmelCase : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_lowerCAmelCase : List[str] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_lowerCAmelCase : Tuple = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_lowerCAmelCase : Union[str, Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ) -> List[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase = datasets.Dataset.from_dict(UpperCamelCase_ )
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
lowerCamelCase = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(UpperCamelCase_ , 'w' , newline='' ) as f:
lowerCamelCase = csv.DictWriter(UpperCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(UpperCamelCase_ , 'w' , newline='' ) as f:
lowerCamelCase = csv.DictWriter(UpperCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
import bza
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(UpperCamelCase_ , 'rb' ) as f:
lowerCamelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
lowerCamelCase = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(UpperCamelCase_ , 'wb' ) as f:
lowerCamelCase = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ )
lowerCamelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase_ ) )] for k in DATA[0]} , schema=UpperCamelCase_ )
writer.write_table(UpperCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase = {'data': DATA}
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase = {'data': DATA_DICT_OF_LISTS}
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(UpperCamelCase_ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(UpperCamelCase_ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(UpperCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 246 | 1 |
_UpperCAmelCase : Any = 9.80665
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : float , lowercase__ : float = g ) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 288 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : int = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 288 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 122 | 1 |
from __future__ import annotations
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = list(range(len(A_ ) ) )
__magic_name__ = [v / w for v, w in zip(A_, A_ )]
index.sort(key=lambda A_ : ratio[i], reverse=A_ )
__magic_name__ = 0
__magic_name__ = [0] * len(A_ )
for i in index:
if weight[i] <= capacity:
__magic_name__ = 1
max_value += value[i]
capacity -= weight[i]
else:
__magic_name__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ )
__magic_name__ = downstream_dict["""projector.weight"""]
__magic_name__ = downstream_dict["""projector.bias"""]
__magic_name__ = downstream_dict["""model.post_net.linear.weight"""]
__magic_name__ = downstream_dict["""model.post_net.linear.bias"""]
return model
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ )
__magic_name__ = downstream_dict["""model.linear.weight"""]
__magic_name__ = downstream_dict["""model.linear.bias"""]
return model
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ )
__magic_name__ = downstream_dict["""connector.weight"""]
__magic_name__ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__magic_name__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
__magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
__magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__magic_name__ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = checkpoint["""Downstream"""]
__magic_name__ = WavaVecaConfig.from_pretrained(A_ )
__magic_name__ = WavaVecaFeatureExtractor.from_pretrained(
A_, return_attention_mask=A_, do_normalize=A_ )
__magic_name__ = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__magic_name__ = convert_classification(A_, A_, A_ )
elif arch.endswith("""ForAudioFrameClassification""" ):
__magic_name__ = convert_diarization(A_, A_, A_ )
elif arch.endswith("""ForXVector""" ):
__magic_name__ = convert_xvector(A_, A_, A_ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
__magic_name__ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(A_ )
hf_model.save_pretrained(A_ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__lowerCAmelCase : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 76 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151 |
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 1000 ):
UpperCAmelCase = -1
UpperCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase = a * b * c
if candidate >= product:
UpperCAmelCase = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 447 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowercase ( a__ ):
_lowerCAmelCase = "Salesforce/blip-image-captioning-base"
_lowerCAmelCase = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
_lowerCAmelCase = "image_captioner"
_lowerCAmelCase = AutoModelForVisionaSeq
_lowerCAmelCase = ["image"]
_lowerCAmelCase = ["text"]
def __init__( self : Optional[int] , *lowercase__ : List[Any] , **lowercase__ : int ):
requires_backends(self , ['''vision'''] )
super().__init__(*lowercase__ , **lowercase__ )
def __magic_name__ ( self : List[Any] , lowercase__ : "Image" ):
return self.pre_processor(images=lowercase__ , return_tensors='''pt''' )
def __magic_name__ ( self : List[Any] , lowercase__ : Dict ):
return self.model.generate(**lowercase__ )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : List[str] ):
return self.pre_processor.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )[0].strip()
| 715 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = IFImgaImgSuperResolutionPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def __magic_name__ ( self : List[str] ):
return self._get_superresolution_dummy_components()
def __magic_name__ ( self : List[str] , lowercase__ : List[str] , lowercase__ : Optional[int]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
a_ = torch.manual_seed(lowercase__ )
else:
a_ = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
a_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
a_ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
a_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __magic_name__ ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __magic_name__ ( self : List[str] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __magic_name__ ( self : Any ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : int ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : Union[str, Any] ):
self._test_save_load_local()
def __magic_name__ ( self : Tuple ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 143 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class lowerCamelCase_ ( lowercase ):
__lowercase : Optional[Any] = "gpt_neox_japanese"
def __init__( self , lowerCamelCase_=3_20_00 , lowerCamelCase_=25_60 , lowerCamelCase_=32 , lowerCamelCase_=32 , lowerCamelCase_=4 , lowerCamelCase_="gelu" , lowerCamelCase_=1.00 , lowerCamelCase_=1_00_00 , lowerCamelCase_=20_48 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-5 , lowerCamelCase_=True , lowerCamelCase_=3_19_96 , lowerCamelCase_=3_19_99 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , **lowerCamelCase_ , ) -> Dict:
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_multiple_size
_UpperCamelCase = hidden_act
_UpperCamelCase = rotary_pct
_UpperCamelCase = rotary_emb_base
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = use_cache
_UpperCamelCase = attention_dropout
_UpperCamelCase = hidden_dropout
| 147 |
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = graph
self._normalize_graph(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = len(lowerCamelCase_ )
_UpperCamelCase = None
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
if sources is int:
_UpperCamelCase = [sources]
if sinks is int:
_UpperCamelCase = [sinks]
if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) == 0:
return
_UpperCamelCase = sources[0]
_UpperCamelCase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowerCamelCase_ ) > 1 or len(lowerCamelCase_ ) > 1:
_UpperCamelCase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_UpperCamelCase = max_input_flow
_UpperCamelCase = 0
_UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_UpperCamelCase = max_input_flow
_UpperCamelCase = size - 1
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowercase ( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = algorithm(self )
class lowerCamelCase_ :
def __init__( self , lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = flow_network
_UpperCamelCase = flow_network.verticesCount
_UpperCamelCase = flow_network.sourceIndex
_UpperCamelCase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_UpperCamelCase = flow_network.graph
_UpperCamelCase = False
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.executed:
self._algorithm()
_UpperCamelCase = True
def lowercase ( self ) -> List[str]:
"""simple docstring"""
pass
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
super().__init__(lowerCamelCase_ )
# use this to save your result
_UpperCamelCase = -1
def lowercase ( self ) -> List[str]:
"""simple docstring"""
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
super().__init__(lowerCamelCase_ )
_UpperCamelCase = [[0] * self.verticies_count for i in range(self.verticies_count )]
_UpperCamelCase = [0] * self.verticies_count
_UpperCamelCase = [0] * self.verticies_count
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_UpperCamelCase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
_UpperCamelCase = vertices_list[i]
_UpperCamelCase = self.heights[vertex_index]
self.process_vertex(lowerCamelCase_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowerCamelCase_ ) )
_UpperCamelCase = 0
else:
i += 1
_UpperCamelCase = sum(self.preflow[self.source_index] )
def lowercase ( self , lowerCamelCase_ ) -> int:
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowerCamelCase_ , lowerCamelCase_ )
self.relabel(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowercase ( self , lowerCamelCase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_UpperCamelCase = self.heights[to_index]
if min_height is not None:
_UpperCamelCase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase = [0]
__lowerCAmelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 147 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = IMAGENET_DEFAULT_MEAN , __UpperCAmelCase = IMAGENET_DEFAULT_STD , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
__A : Dict = size if size is not None else {"shortest_edge": 224}
__A : Dict = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__A : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
__A : Any = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
__A : Dict = do_resize
__A : Tuple = size
__A : Any = resample
__A : Union[str, Any] = do_center_crop
__A : List[Any] = crop_size
__A : Tuple = do_rescale
__A : List[str] = rescale_factor
__A : Any = do_normalize
__A : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__A : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : List[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__A : List[str] = int((256 / 224) * size["shortest_edge"] )
__A : Dict = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__A : Optional[Any] = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
__UpperCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Union[str, Any] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
__A : Tuple = do_resize if do_resize is not None else self.do_resize
__A : Optional[Any] = resample if resample is not None else self.resample
__A : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__A : Tuple = image_mean if image_mean is not None else self.image_mean
__A : Union[str, Any] = image_std if image_std is not None else self.image_std
__A : Dict = size if size is not None else self.size
__A : Dict = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : List[str] = get_size_dict(__UpperCAmelCase , param_name="crop_size" )
__A : str = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__A : List[str] = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
__A : Dict = [self.resize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_center_crop:
__A : Optional[Any] = [self.center_crop(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_rescale:
__A : List[str] = [self.rescale(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_normalize:
__A : Dict = [self.normalize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
__A : Optional[Any] = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__A : Tuple = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 713 | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 387 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "mgp-str"
def __init__( self : int , A__ : int=[3_2, 1_2_8] , A__ : Optional[int]=4 , A__ : int=3 , A__ : Any=2_7 , A__ : Dict=3_8 , A__ : Optional[Any]=5_0_2_5_7 , A__ : Tuple=3_0_5_2_2 , A__ : List[Any]=7_6_8 , A__ : int=1_2 , A__ : int=1_2 , A__ : Dict=4.0 , A__ : Tuple=True , A__ : Union[str, Any]=False , A__ : List[str]=1E-5 , A__ : Tuple=0.0 , A__ : Optional[int]=0.0 , A__ : Union[str, Any]=0.0 , A__ : Optional[Any]=False , A__ : Union[str, Any]=0.02 , **A__ : List[Any] , ) -> int:
'''simple docstring'''
super().__init__(**A__ )
a__ : Dict = image_size
a__ : Union[str, Any] = patch_size
a__ : Union[str, Any] = num_channels
a__ : Dict = max_token_length
a__ : Union[str, Any] = num_character_labels
a__ : Union[str, Any] = num_bpe_labels
a__ : Any = num_wordpiece_labels
a__ : Tuple = hidden_size
a__ : Dict = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : int = mlp_ratio
a__ : Optional[int] = distilled
a__ : Tuple = layer_norm_eps
a__ : str = drop_rate
a__ : str = qkv_bias
a__ : List[str] = attn_drop_rate
a__ : Union[str, Any] = drop_path_rate
a__ : List[str] = output_aa_attentions
a__ : Optional[int] = initializer_range
| 688 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : str = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
a__ , a__ : List[Any] = get_aligned_output_features_output_indices(A__ , A__ , A__ )
self.assertEqual(A__ , ['''c'''] )
self.assertEqual(A__ , [2] )
# Out indices set to match out features
a__ , a__ : Optional[int] = get_aligned_output_features_output_indices(['''a''', '''c'''] , A__ , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features set to match out indices
a__ , a__ : int = get_aligned_output_features_output_indices(A__ , [0, 2] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [0, 2] )
# Out features selected from negative indices
a__ , a__ : List[str] = get_aligned_output_features_output_indices(A__ , [-3, -1] , A__ )
self.assertEqual(A__ , ['''a''', '''c'''] )
self.assertEqual(A__ , [-3, -1] )
def __lowerCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , A__ )
# Out features must be a list
with self.assertRaises(A__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(A__ ):
verify_out_features_out_indices(A__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(A__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def __lowerCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
a__ : Optional[Any] = BackboneMixin()
a__ : int = ['''a''', '''b''', '''c''']
a__ : List[Any] = ['''a''', '''c''']
a__ : Tuple = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
a__ : Dict = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
a__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 688 | 1 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = ComputeEnvironment.AMAZON_SAGEMAKER
snake_case__ : str = True
snake_case__ : int = "ml.p3.2xlarge"
snake_case__ : Any = "accelerate_sagemaker_execution_role"
snake_case__ : Optional[int] = "hf-sm"
snake_case__ : Any = "us-east-1"
snake_case__ : Optional[int] = 1
snake_case__ : Tuple = "accelerate-sagemaker-1"
snake_case__ : List[str] = "1.6"
snake_case__ : Optional[Any] = "4.4"
snake_case__ : str = "train.py"
snake_case__ : List[Any] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
snake_case__ : str = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__SCREAMING_SNAKE_CASE = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , UpperCAmelCase__ )
assert isinstance(converted_args["do_train"] , UpperCAmelCase__ )
assert isinstance(converted_args["epochs"] , UpperCAmelCase__ )
assert isinstance(converted_args["learning_rate"] , UpperCAmelCase__ )
assert isinstance(converted_args["max_steps"] , UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 709 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
__SCREAMING_SNAKE_CASE = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __A ( a_ : int ,a_ : str=0.9_9_9 ,a_ : List[str]="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ : List[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ : List[str] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCAmelCase : Optional[int] = []
for i in range(a_ ):
lowerCAmelCase : Dict = i / num_diffusion_timesteps
lowerCAmelCase : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) ,a_ ) )
return torch.tensor(a_ ,dtype=torch.floataa )
class lowerCamelCase ( _A , _A ):
snake_case_ = [e.name for e in KarrasDiffusionSchedulers]
snake_case_ = 2
@register_to_config
def __init__( self , a_ = 1_000 , a_ = 0.00085 , a_ = 0.012 , a_ = "linear" , a_ = None , a_ = "epsilon" , a_ = False , a_ = False , a_ = 1.0 , a_ = "linspace" , a_ = 0 , ):
if trained_betas is not None:
lowerCAmelCase : List[Any] = torch.tensor(a_ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCAmelCase : List[str] = torch.linspace(a_ , a_ , a_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase : List[Any] = betas_for_alpha_bar(a_ , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
lowerCAmelCase : Any = betas_for_alpha_bar(a_ , alpha_transform_type="exp" )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCAmelCase : Optional[int] = 1.0 - self.betas
lowerCAmelCase : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a_ , a_ , a_ )
lowerCAmelCase : Union[str, Any] = use_karras_sigmas
def _lowerCamelCase ( self , a_ , a_=None ):
if schedule_timesteps is None:
lowerCAmelCase : List[str] = self.timesteps
lowerCAmelCase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCAmelCase : Union[str, Any] = 1 if len(a_ ) > 1 else 0
else:
lowerCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
lowerCAmelCase : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self , a_ , a_ , ):
lowerCAmelCase : Union[str, Any] = self.index_for_timestep(a_ )
lowerCAmelCase : Any = self.sigmas[step_index]
lowerCAmelCase : int = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self , a_ , a_ = None , a_ = None , ):
lowerCAmelCase : List[Any] = num_inference_steps
lowerCAmelCase : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCAmelCase : Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , a_ , dtype=a_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCAmelCase : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Dict = (np.arange(0 , a_ ) * step_ratio).round()[::-1].copy().astype(a_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCAmelCase : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCAmelCase : int = (np.arange(a_ , 0 , -step_ratio )).round().copy().astype(a_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCAmelCase : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCAmelCase : Any = np.log(a_ )
lowerCAmelCase : int = np.interp(a_ , np.arange(0 , len(a_ ) ) , a_ )
if self.config.use_karras_sigmas:
lowerCAmelCase : int = self._convert_to_karras(in_sigmas=a_ , num_inference_steps=self.num_inference_steps )
lowerCAmelCase : Any = np.array([self._sigma_to_t(a_ , a_ ) for sigma in sigmas] )
lowerCAmelCase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCAmelCase : int = torch.from_numpy(a_ ).to(device=a_ )
lowerCAmelCase : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCAmelCase : Union[str, Any] = torch.from_numpy(a_ )
lowerCAmelCase : Union[str, Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a_ ).startswith("mps" ):
# mps does not support float64
lowerCAmelCase : str = timesteps.to(a_ , dtype=torch.floataa )
else:
lowerCAmelCase : int = timesteps.to(device=a_ )
# empty dt and derivative
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCAmelCase : int = defaultdict(a_ )
def _lowerCamelCase ( self , a_ , a_ ):
# get log sigma
lowerCAmelCase : str = np.log(a_ )
# get distribution
lowerCAmelCase : int = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCAmelCase : int = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCAmelCase : Any = low_idx + 1
lowerCAmelCase : str = log_sigmas[low_idx]
lowerCAmelCase : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
lowerCAmelCase : Optional[Any] = (low - log_sigma) / (low - high)
lowerCAmelCase : List[str] = np.clip(a_ , 0 , 1 )
# transform interpolation to time range
lowerCAmelCase : int = (1 - w) * low_idx + w * high_idx
lowerCAmelCase : Dict = t.reshape(sigma.shape )
return t
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : float = in_sigmas[-1].item()
lowerCAmelCase : float = in_sigmas[0].item()
lowerCAmelCase : str = 7.0 # 7.0 is the value used in the paper
lowerCAmelCase : Optional[int] = np.linspace(0 , 1 , a_ )
lowerCAmelCase : Optional[int] = sigma_min ** (1 / rho)
lowerCAmelCase : str = sigma_max ** (1 / rho)
lowerCAmelCase : List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self ):
return self.dt is None
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ = True , ):
lowerCAmelCase : Optional[int] = self.index_for_timestep(a_ )
# advance index counter by 1
lowerCAmelCase : List[str] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCAmelCase : Optional[Any] = self.sigmas[step_index]
lowerCAmelCase : Dict = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCAmelCase : List[str] = self.sigmas[step_index - 1]
lowerCAmelCase : int = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCAmelCase : int = 0
lowerCAmelCase : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCAmelCase : int = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : List[str] = sigma_hat if self.state_in_first_order else sigma_next
lowerCAmelCase : int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCAmelCase : Optional[int] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCAmelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCAmelCase : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
lowerCAmelCase : List[str] = derivative
lowerCAmelCase : Optional[int] = dt
lowerCAmelCase : Dict = sample
else:
# 2. 2nd order / Heun's method
lowerCAmelCase : Dict = (sample - pred_original_sample) / sigma_next
lowerCAmelCase : Optional[int] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCAmelCase : Optional[int] = self.dt
lowerCAmelCase : Tuple = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCAmelCase : Tuple = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def _lowerCamelCase ( self , a_ , a_ , a_ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a_ ):
# mps does not support float64
lowerCAmelCase : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCAmelCase : Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCAmelCase : List[Any] = self.timesteps.to(original_samples.device )
lowerCAmelCase : str = timesteps.to(original_samples.device )
lowerCAmelCase : Tuple = [self.index_for_timestep(a_ , a_ ) for t in timesteps]
lowerCAmelCase : Dict = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCAmelCase : Any = sigma.unsqueeze(-1 )
lowerCAmelCase : str = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 525 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 664 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCAmelCase ( snake_case_ ):
def __lowerCAmelCase ( self ):
_lowerCAmelCase = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase = '''pt'''
_lowerCAmelCase = '''tf'''
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowerCAmelCase )
model_tf.save_pretrained(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''mock_framework'''
# Framework provided - return whatever the user provides
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def __lowerCAmelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowerCAmelCase )
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(_lowerCAmelCase )
def __lowerCAmelCase ( self ):
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowerCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
_lowerCAmelCase = MagicMock(return_value=_lowerCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , _lowerCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , _lowerCAmelCase ):
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = FeaturesManager.determine_framework(self.test_model ) | 664 | 1 |
def _lowerCamelCase( __snake_case ) -> int:
assert (
isinstance(__snake_case , __snake_case ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
__snake_case , __snake_case = 1, 1
for _ in range(number_of_steps - 1 ):
__snake_case , __snake_case = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524 | from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 524 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
snake_case : Union[str, Any] = TypeVar('''T''')
snake_case : List[Any] = Union[List[T], Tuple[T, ...]]
snake_case : int = Union[T, List[T], Dict[str, T]]
snake_case : Tuple = Union[str, bytes, os.PathLike]
| 717 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ ( UpperCamelCase__ : str = "" ):
'''simple docstring'''
UpperCamelCase__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
UpperCamelCase__ = BeautifulSoup(requests.get(UpperCamelCase__ ).text, '''html.parser''' )
UpperCamelCase__ = soup.find_all('''td''', attrs='''titleColumn''' )
UpperCamelCase__ = soup.find_all('''td''', class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(UpperCamelCase__, UpperCamelCase__ )
}
def lowerCamelCase_ ( UpperCamelCase__ : str = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
UpperCamelCase__ = get_imdb_top_aaa_movies()
with open(UpperCamelCase__, '''w''', newline='''''' ) as out_file:
UpperCamelCase__ = csv.writer(UpperCamelCase__ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 240 | import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( UpperCamelCase__ : int ):
'''simple docstring'''
UpperCamelCase__ = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
UpperCamelCase__ = flatten_dict(UpperCamelCase__ )
return flax_params
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCamelCase__ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCamelCase__ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCamelCase__ = new_key.replace(UpperCamelCase__, UpperCamelCase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCamelCase__ = new_key.replace(UpperCamelCase__, UpperCamelCase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCamelCase__ = re.sub(r'''layers_(\d+)''', r'''layer.\1''', UpperCamelCase__ )
UpperCamelCase__ = new_key.replace('''encoder''', '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCamelCase__ = re.sub(r'''layers_(\d+)''', r'''layer.\1''', UpperCamelCase__ )
UpperCamelCase__ = flax_dict[key]
UpperCamelCase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCamelCase__ = torch.from_numpy(converted_dict[key].T )
else:
UpperCamelCase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple=False, UpperCamelCase__ : List[str]=False ):
'''simple docstring'''
UpperCamelCase__ = get_flax_param(UpperCamelCase__ )
if not use_large:
UpperCamelCase__ = PixaStructVisionConfig()
UpperCamelCase__ = PixaStructTextConfig()
else:
UpperCamelCase__ = PixaStructVisionConfig(
hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 )
UpperCamelCase__ = PixaStructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18 )
UpperCamelCase__ = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=UpperCamelCase__ )
UpperCamelCase__ = PixaStructForConditionalGeneration(UpperCamelCase__ )
UpperCamelCase__ = rename_and_convert_flax_params(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
UpperCamelCase__ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCamelCase__ = PixaStructImageProcessor()
UpperCamelCase__ = PixaStructProcessor(image_processor=UpperCamelCase__, tokenizer=UpperCamelCase__ )
if use_large:
UpperCamelCase__ = 4096
UpperCamelCase__ = True
# mkdir if needed
os.makedirs(UpperCamelCase__, exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
print('''Model saved in {}'''.format(UpperCamelCase__ ) )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
lowercase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 240 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = VideoToVideoSDPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
_lowerCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
_lowerCamelCase = False
# No `output_type`.
_lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(lowerCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Any=0 ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if str(lowerCamelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(lowerCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
_UpperCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = VideoToVideoSDPipeline(**lowerCamelCase )
_UpperCAmelCase = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(lowerCamelCase )
_UpperCAmelCase = """np"""
_UpperCAmelCase = sd_pipe(**lowerCamelCase ).frames
_UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_UpperCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase , expected_max_diff=5E-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_UpperCAmelCase = torch.randn((1, 10, 3, 1024, 576) , generator=lowerCamelCase )
_UpperCAmelCase = video.to("""cuda""" )
_UpperCAmelCase = """Spiderman is surfing"""
_UpperCAmelCase = pipe(lowerCamelCase , video=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=3 , output_type="""pt""" ).frames
_UpperCAmelCase = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2 | 719 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = BlenderbotSmallConfig
_lowerCamelCase = {}
_lowerCamelCase = '''gelu'''
def __init__( self : int , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=13 , lowerCamelCase : Optional[Any]=7 , lowerCamelCase : Dict=True , lowerCamelCase : List[str]=False , lowerCamelCase : List[Any]=99 , lowerCamelCase : Tuple=32 , lowerCamelCase : List[str]=2 , lowerCamelCase : Tuple=4 , lowerCamelCase : List[Any]=37 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : Optional[int]=20 , lowerCamelCase : Any=2 , lowerCamelCase : Union[str, Any]=1 , lowerCamelCase : int=0 , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_blenderbot_small_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase = TFBlenderbotSmallModel(config=lowerCamelCase ).get_decoder()
_UpperCAmelCase = inputs_dict["""input_ids"""]
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict["""attention_mask"""][:1, :]
_UpperCAmelCase = inputs_dict["""head_mask"""]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , use_cache=lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
_UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , ) -> int:
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFBlenderbotSmallModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
_lowerCamelCase = '''facebook/blenderbot_small-90M'''
@cached_property
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
@cached_property
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(self.src_text , return_tensors="""tf""" )
_UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCamelCase , )
_UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
) | 402 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ = logging.get_logger(__name__)
class __A ( A ):
'''simple docstring'''
def __init__(self , *A , **A ) -> None:
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 11 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __A( a ):
def __lt__( self , _snake_case ) -> Dict:
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self , _snake_case ) -> Dict:
'''simple docstring'''
return self[-1] == other[-1]
def __lowerCAmelCase ( a__ ) -> list:
__a = []
# sort into stacks
for element in collection:
__a = Stack([element] )
__a = bisect_left(a__ , a__ )
if i != len(a__ ):
stacks[i].append(a__ )
else:
stacks.append(a__ )
# use a heap-based merge to merge stack efficiently
__a = merge(*(reversed(a__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
A : List[Any] = input('Enter numbers separated by a comma:\n').strip()
A : Any = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted)) | 219 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : UNetaDModel
__lowerCamelCase : ScoreSdeVeScheduler
def __init__( self : Tuple , __lowercase : UNetaDModel , __lowercase : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__( self : Any , __lowercase : int = 1 , __lowercase : int = 2000 , __lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , **__lowercase : Any , ):
'''simple docstring'''
__a = self.unet.config.sample_size
__a = (batch_size, 3, img_size, img_size)
__a = self.unet
__a = randn_tensor(__lowercase , generator=__lowercase ) * self.scheduler.init_noise_sigma
__a = sample.to(self.device )
self.scheduler.set_timesteps(__lowercase )
self.scheduler.set_sigmas(__lowercase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__a = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__a = self.unet(__lowercase , __lowercase ).sample
__a = self.scheduler.step_correct(__lowercase , __lowercase , generator=__lowercase ).prev_sample
# prediction step
__a = model(__lowercase , __lowercase ).sample
__a = self.scheduler.step_pred(__lowercase , __lowercase , __lowercase , generator=__lowercase )
__a , __a = output.prev_sample, output.prev_sample_mean
__a = sample_mean.clamp(0 , 1 )
__a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__lowercase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowercase )
| 547 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : int , __lowercase : str=13 , __lowercase : Tuple=7 , __lowercase : int=True , __lowercase : Optional[int]=True , __lowercase : List[str]=True , __lowercase : List[str]=True , __lowercase : Any=99 , __lowercase : int=32 , __lowercase : Optional[int]=2 , __lowercase : List[str]=4 , __lowercase : int=37 , __lowercase : Optional[int]="gelu" , __lowercase : Any=0.1 , __lowercase : List[Any]=0.1 , __lowercase : int=512 , __lowercase : str=16 , __lowercase : str=2 , __lowercase : Optional[Any]=0.02 , __lowercase : str=3 , __lowercase : str=4 , __lowercase : str=None , ):
'''simple docstring'''
__a = parent
__a = 13
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = 99
__a = 32
__a = 2
__a = 4
__a = 37
__a = """gelu"""
__a = 0.1
__a = 0.1
__a = 512
__a = 16
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Any , __lowercase : Tuple , __lowercase : int , __lowercase : List[Any] , __lowercase : List[Any] ):
'''simple docstring'''
__a = TFRoFormerModel(config=__lowercase )
__a = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__a = [input_ids, input_mask]
__a = model(__lowercase )
__a = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Any , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : int , __lowercase : Any , __lowercase : str , __lowercase : Optional[int] ):
'''simple docstring'''
__a = True
__a = TFRoFormerForCausalLM(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : int , __lowercase : List[str] , __lowercase : str ):
'''simple docstring'''
__a = TFRoFormerForMaskedLM(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Dict , __lowercase : List[str] , __lowercase : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = TFRoFormerForSequenceClassification(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Any , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : int , __lowercase : Tuple , __lowercase : int , __lowercase : Any ):
'''simple docstring'''
__a = self.num_choices
__a = TFRoFormerForMultipleChoice(config=__lowercase )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(__lowercase , 1 ) , (1, self.num_choices, 1) )
__a = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : str , __lowercase : int , __lowercase : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = TFRoFormerForTokenClassification(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Any , __lowercase : str , __lowercase : Dict ):
'''simple docstring'''
__a = TFRoFormerForQuestionAnswering(config=__lowercase )
__a = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__a = model(__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCamelCase : Optional[int] =(
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : Optional[int] =False
__lowerCamelCase : Tuple =False
def UpperCamelCase_ ( self : Any , __lowercase : int , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Tuple ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = TFRoFormerModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(__lowercase )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(__lowercase )[0]
# TODO Replace vocab size
__a = 50000
__a = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowercase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__a = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1E-4 )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__lowerCamelCase : Dict =1e-4
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = tf.constant([[4, 10]] )
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__a = emba(input_ids.shape )
__a = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowercase , __lowercase , atol=self.tolerance )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__a = emba.weight[:3, :5]
tf.debugging.assert_near(__lowercase , __lowercase , atol=self.tolerance )
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
__lowerCamelCase : int =1e-4
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
# 2,12,16,64
__a = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__a = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__a = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__a = embed_positions([2, 16, 768] )[None, None, :, :]
__a , __a = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowercase , __lowercase , __lowercase )
__a = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__a = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowercase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowercase , atol=self.tolerance )
| 547 | 1 |
import operator
def _lowerCAmelCase ( UpperCamelCase__: list , UpperCamelCase__: bool = False , UpperCamelCase__: list | None = None ) -> list:
"""simple docstring"""
A = operator.lt if reverse else operator.gt
A = solution or []
if not arr:
return solution
A = [arr.pop(0 )]
for i, item in enumerate(UpperCamelCase__ ):
if _operator(UpperCamelCase__ , sublist[-1] ):
sublist.append(UpperCamelCase__ )
arr.pop(UpperCamelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCamelCase__ )
else:
while sublist:
A = sublist.pop(0 )
for i, xx in enumerate(UpperCamelCase__ ):
if not _operator(UpperCamelCase__ , UpperCamelCase__ ):
solution.insert(UpperCamelCase__ , UpperCamelCase__ )
break
else:
solution.append(UpperCamelCase__ )
strand_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 641 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCAmelCase ( UpperCamelCase__: List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("""test""" )
else:
A = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Any:
"""simple docstring"""
A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
A = script_name
else:
A = f'--config_file={args.config_file} {script_name}'
A = ["""accelerate-launch"""] + test_args.split()
A = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A = test_command_parser()
A = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 641 | 1 |
"""simple docstring"""
class __lowercase :
def __init__( self : List[Any] ,A : Dict ):
'''simple docstring'''
# we need a list not a string, so do something to change the type
UpperCAmelCase__ : List[str] = arr.split(""",""" )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [int(self.array[0] )] * len(self.array )
UpperCAmelCase__ : Optional[int] = [int(self.array[0] )] * len(self.array )
for i in range(1 ,len(self.array ) ):
UpperCAmelCase__ : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] ,int(self.array[i] ) )
UpperCAmelCase__ : Tuple = max(sum_value[i] ,rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__UpperCAmelCase = input('please input some numbers:')
__UpperCAmelCase = SubArray(whole_array)
__UpperCAmelCase = array.solve_sub_array()
print(('the results is:', re))
| 194 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase__ : List[str] = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
UpperCAmelCase__ : List[Any] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
UpperCAmelCase__ : Optional[Any] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
UpperCAmelCase__ : int = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
UpperCAmelCase__ : Any = orig_key.split(""".""" )[0].split("""_""" )[-1]
UpperCAmelCase__ : Dict = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
UpperCAmelCase__ : Tuple = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
UpperCAmelCase__ : Dict = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
UpperCAmelCase__ : str = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
UpperCAmelCase__ : List[Any] = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
UpperCAmelCase__ : str = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
UpperCAmelCase__ : Optional[int] = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
UpperCAmelCase__ : Dict = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
UpperCAmelCase__ : Any = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
UpperCAmelCase__ : Any = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
UpperCAmelCase__ : Optional[Any] = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
UpperCAmelCase__ : Dict = """yoso.""" + orig_key
return orig_key
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = orig_state_dict.pop(__UpperCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase__ : Dict = val
UpperCAmelCase__ : Dict = orig_state_dict["""cls.predictions.decoder.bias"""]
UpperCAmelCase__ : Optional[Any] = torch.arange(__UpperCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model_state_dict"""]
UpperCAmelCase__ : List[str] = YosoConfig.from_json_file(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = YosoForMaskedLM(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = convert_checkpoint_helper(config.max_position_embeddings , __UpperCamelCase )
print(model.load_state_dict(__UpperCamelCase ) )
model.eval()
model.save_pretrained(__UpperCamelCase )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 194 | 1 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Dict = 1
_UpperCamelCase : Any = 2
while i * i <= n:
_UpperCamelCase : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A__ ( ):
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Tuple = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCAmelCase_ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 195 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return int((input_a, input_a).count(0 ) != 0 )
def A__ ( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 195 | 1 |
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
def lowerCamelCase_ ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['torch'] )
def lowerCamelCase_ ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['torch'] )
def lowerCamelCase_ ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['torch'] )
def lowerCamelCase_ ( *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['torch'] )
def lowerCamelCase_ ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['torch'] )
def lowerCamelCase_ ( *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['torch'] )
def lowerCamelCase_ ( *UpperCamelCase__ : int , **UpperCamelCase__ : Dict ) -> Tuple:
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['torch'] )
class __lowerCAmelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case_ = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str:
'''simple docstring'''
requires_backends(cls , ['torch'] )
@classmethod
def lowercase_ ( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['torch'] )
| 711 |
__A = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 167 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase_ (lowercase_ ):
snake_case ='''Speech2TextFeatureExtractor'''
snake_case ='''Speech2TextTokenizer'''
def __init__( self , lowercase_ , lowercase_) -> Dict:
super().__init__(lowercase_ , lowercase_)
a__ =self.feature_extractor
a__ =False
def __call__( self , *lowercase_ , **lowercase_) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_)
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')
a__ =kwargs.pop('raw_speech')
else:
a__ =kwargs.pop('audio' , lowercase_)
a__ =kwargs.pop('sampling_rate' , lowercase_)
a__ =kwargs.pop('text' , lowercase_)
if len(lowercase_) > 0:
a__ =args[0]
a__ =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
a__ =self.feature_extractor(lowercase_ , *lowercase_ , sampling_rate=lowercase_ , **lowercase_)
if text is not None:
a__ =self.tokenizer(lowercase_ , **lowercase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a__ =encodings['input_ids']
return inputs
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCamelCase ( self , *lowercase_ , **lowercase_) -> List[Any]:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@contextmanager
def __UpperCamelCase ( self) -> Dict:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.')
a__ =True
a__ =self.tokenizer
yield
a__ =self.feature_extractor
a__ =False
| 20 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowercase ( lowercase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__SCREAMING_SNAKE_CASE : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__SCREAMING_SNAKE_CASE : str = "text"
__SCREAMING_SNAKE_CASE : str = "labels"
def a ( self , snake_case ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.label_schema.copy()
snake_case_ = features[self.label_column]
snake_case_ = label_schema
return task_template
@property
def a ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 362 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
snake_case_ = HfArgumentParser(InitializationArguments)
snake_case_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
snake_case_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
snake_case_ = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
snake_case_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
snake_case_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 262 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( SCREAMING_SNAKE_CASE_ ) -> tuple:
return (data["data"], data["target"])
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> np.ndarray:
lowerCamelCase : Dict =XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
lowerCamelCase : List[str] =xgb.predict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] =predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def A__ ( ) -> None:
lowerCamelCase : Union[str, Any] =fetch_california_housing()
lowerCamelCase , lowerCamelCase : Optional[Any] =data_handling(SCREAMING_SNAKE_CASE_ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : str =train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.2_5 , random_state=1 )
lowerCamelCase : str =xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
print(F"Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 262 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCAmelCase_ : Dict = datasets.logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCAmelCase_ : Union[str, Any] = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCAmelCase_ : Dict = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase="dummy_doc" ):
__magic_name__ : Optional[int] ={doc: key_lines}
__magic_name__ : Optional[int] ={doc: sys_lines}
__magic_name__ : Tuple ={}
__magic_name__ : Optional[Any] =0
__magic_name__ : str =0
__magic_name__ : str =0
__magic_name__ : Optional[Any] =0
__magic_name__ : str =0
__magic_name__ : Tuple =0
__magic_name__ , __magic_name__ : str =reader.get_doc_mentions(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
__magic_name__ : List[Any] =reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase )
__magic_name__ , __magic_name__ : Dict =reader.get_doc_mentions(lowerCamelCase , sys_doc_lines[doc] , lowerCamelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
__magic_name__ : str =reader.set_annotated_parse_trees(lowerCamelCase , key_doc_lines[doc] , lowerCamelCase , lowerCamelCase )
if remove_nested:
__magic_name__ , __magic_name__ : List[str] =reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__magic_name__ , __magic_name__ : int =reader.remove_nested_coref_mentions(lowerCamelCase , lowerCamelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__magic_name__ : Optional[Any] =reader.get_mention_assignments(lowerCamelCase , lowerCamelCase )
__magic_name__ : str =reader.get_mention_assignments(lowerCamelCase , lowerCamelCase )
__magic_name__ : List[str] =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =get_coref_infos(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__magic_name__ : Union[str, Any] ={}
__magic_name__ : Union[str, Any] =0
__magic_name__ : Optional[Any] =0
for name, metric in metrics:
__magic_name__ , __magic_name__ , __magic_name__ : Any =evaluator.evaluate_documents(lowerCamelCase , lowerCamelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__magic_name__ : Union[str, Any] =(conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__magic_name__ : Dict =line.split()[5]
if not parse_col == "-":
__magic_name__ : int =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def A__ ( self :List[str] , __snake_case :Union[str, Any] , __snake_case :Optional[int] , __snake_case :Any=True , __snake_case :Union[str, Any]=False , __snake_case :str=False , __snake_case :Optional[Any]=False ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__magic_name__ : List[str] =util.check_gold_parse_annotation(__snake_case )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__magic_name__ : str =evaluate(
key_lines=__snake_case , sys_lines=__snake_case , metrics=__snake_case , NP_only=__snake_case , remove_nested=__snake_case , keep_singletons=__snake_case , min_span=__snake_case , )
return score
| 21 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _snake_case ( _A ):
_A = 'umt5'
_A = ['past_key_values']
def __init__( self ,UpperCamelCase=250_112 ,UpperCamelCase=512 ,UpperCamelCase=64 ,UpperCamelCase=1_024 ,UpperCamelCase=8 ,UpperCamelCase=None ,UpperCamelCase=6 ,UpperCamelCase=32 ,UpperCamelCase=128 ,UpperCamelCase=0.1 ,UpperCamelCase=1E-6 ,UpperCamelCase=1.0 ,UpperCamelCase="gated-gelu" ,UpperCamelCase=True ,UpperCamelCase=True ,UpperCamelCase="T5Tokenizer" ,UpperCamelCase=True ,UpperCamelCase=0 ,UpperCamelCase=1 ,UpperCamelCase=0 ,**UpperCamelCase ,) -> Optional[Any]:
super().__init__(
is_encoder_decoder=UpperCamelCase ,tokenizer_class=UpperCamelCase ,tie_word_embeddings=UpperCamelCase ,pad_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,decoder_start_token_id=UpperCamelCase ,**UpperCamelCase ,)
snake_case__ :str = vocab_size
snake_case__ :Any = d_model
snake_case__ :Optional[Any] = d_kv
snake_case__ :Dict = d_ff
snake_case__ :Dict = num_layers
snake_case__ :Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case__ :Union[str, Any] = num_heads
snake_case__ :Optional[Any] = relative_attention_num_buckets
snake_case__ :Any = relative_attention_max_distance
snake_case__ :List[Any] = dropout_rate
snake_case__ :Optional[int] = layer_norm_epsilon
snake_case__ :Optional[int] = initializer_factor
snake_case__ :Dict = feed_forward_proj
snake_case__ :List[str] = use_cache
snake_case__ :Any = self.feed_forward_proj.split("-" )
snake_case__ :Any = act_info[-1]
snake_case__ :Optional[int] = act_info[0] == "gated"
if len(UpperCamelCase ) > 1 and act_info[0] != "gated" or len(UpperCamelCase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
snake_case__ :Any = "gelu_new"
@property
def lowerCAmelCase_ ( self ) -> List[Any]:
return self.d_model
@property
def lowerCAmelCase_ ( self ) -> int:
return self.num_heads
@property
def lowerCAmelCase_ ( self ) -> str:
return self.num_layers
class _snake_case ( _A ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
snake_case__ :List[str] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
snake_case__ :Tuple = "past_encoder_sequence + sequence"
snake_case__ :str = {0: "batch"}
snake_case__ :Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case__ :List[str] = {0: "batch", 1: "decoder_sequence"}
snake_case__ :Optional[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase ,direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowerCAmelCase_ ( self ) -> int:
return 13
@property
def lowerCAmelCase_ ( self ) -> float:
return 5E-4 | 57 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( _A , _A , _A ):
@register_to_config
def __init__( self ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = False ,) -> int:
super().__init__()
snake_case__ :Union[str, Any] = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :int = nn.Embedding(UpperCamelCase ,UpperCamelCase )
snake_case__ :Any = False
snake_case__ :List[Any] = nn.Dropout(p=UpperCamelCase )
snake_case__ :Tuple = TaConfig(
vocab_size=UpperCamelCase ,d_model=UpperCamelCase ,num_heads=UpperCamelCase ,d_kv=UpperCamelCase ,d_ff=UpperCamelCase ,dropout_rate=UpperCamelCase ,feed_forward_proj=UpperCamelCase ,is_decoder=UpperCamelCase ,is_encoder_decoder=UpperCamelCase ,)
snake_case__ :List[str] = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
snake_case__ :List[Any] = TaBlock(UpperCamelCase )
self.encoders.append(UpperCamelCase )
snake_case__ :Optional[Any] = TaLayerNorm(UpperCamelCase )
snake_case__ :Any = nn.Dropout(p=UpperCamelCase )
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase ) -> int:
snake_case__ :str = self.token_embedder(UpperCamelCase )
snake_case__ :int = encoder_input_tokens.shape[1]
snake_case__ :List[Any] = torch.arange(UpperCamelCase ,device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase )
snake_case__ :Optional[int] = self.dropout_pre(UpperCamelCase )
# inverted the attention mask
snake_case__ :Optional[Any] = encoder_input_tokens.size()
snake_case__ :Dict = self.get_extended_attention_mask(UpperCamelCase ,UpperCamelCase )
for lyr in self.encoders:
snake_case__ :str = lyr(UpperCamelCase ,UpperCamelCase )[0]
snake_case__ :List[Any] = self.layer_norm(UpperCamelCase )
return self.dropout_post(UpperCamelCase ), encoder_inputs_mask | 57 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , SCREAMING_SNAKE_CASE__=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , SCREAMING_SNAKE_CASE__=True , ) -> List[Any]:
A__ = size if size is not None else {"height": 224, "width": 224}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_convert_rgb
def snake_case__ ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ) -> Tuple:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A__ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
A__ = []
for i in range(self.batch_size ):
A__ , A__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
A__ = [torch.from_numpy(SCREAMING_SNAKE_CASE__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> str:
A__ = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE__ )
@property
def snake_case__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> Tuple:
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_convert_rgb" ) )
def snake_case__ ( self ) -> List[str]:
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case__ ( self ) -> str:
pass
def snake_case__ ( self ) -> str:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self ) -> List[str]:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self ) -> int:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> Dict:
A__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE__ )
A__ = 3
@property
def snake_case__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> Dict:
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_convert_rgb" ) )
def snake_case__ ( self ) -> Dict:
pass
def snake_case__ ( self ) -> Optional[Any]:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 104 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowerCAmelCase = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowerCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] ):
__magic_name__ = WATERMARK_BITS
__magic_name__ = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def snake_case__ ( self : Optional[Any] , a__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
__magic_name__ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = [self.encoder.encode(a__ , '''dwtDct''' ) for image in images]
__magic_name__ = torch.from_numpy(np.array(a__ ) ).permute(0 , 3 , 1 , 2 )
__magic_name__ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 432 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""input_features"""]
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int=80 , SCREAMING_SNAKE_CASE : Union[str, Any]=16_000 , SCREAMING_SNAKE_CASE : Optional[int]=160 , SCREAMING_SNAKE_CASE : Optional[Any]=30 , SCREAMING_SNAKE_CASE : Dict=400 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : int=False , **SCREAMING_SNAKE_CASE : Tuple , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ : List[str] = n_fft
lowercase__ : Any = hop_length
lowercase__ : Dict = chunk_length
lowercase__ : Union[str, Any] = chunk_length * sampling_rate
lowercase__ : Any = self.n_samples // hop_length
lowercase__ : List[Any] = sampling_rate
lowercase__ : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.array ):
lowercase__ : Tuple = spectrogram(
SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
lowercase__ : Tuple = log_spec[:, :-1]
lowercase__ : Any = np.maximum(SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
lowercase__ : Optional[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case ( SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : List[np.ndarray] , SCREAMING_SNAKE_CASE : float = 0.0 ):
if attention_mask is not None:
lowercase__ : List[str] = np.array(SCREAMING_SNAKE_CASE , np.intaa )
lowercase__ : List[str] = []
for vector, length in zip(SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
lowercase__ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase__ : List[Any] = padding_value
normed_input_values.append(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : str , SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[str] = "max_length" , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , **SCREAMING_SNAKE_CASE : List[Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ : str = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ : Any = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
lowercase__ : Tuple = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ : Optional[Any] = [np.asarray([raw_speech] ).T]
lowercase__ : List[str] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowercase__ : List[Any] = self.pad(
SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase__ : Dict = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
lowercase__ : Dict = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
lowercase__ : Tuple = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
lowercase__ : str = [self._np_extract_fbank_features(SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
lowercase__ : List[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase__ : Optional[int] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowercase__ : str = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE )
return padded_inputs
def snake_case ( self : Dict ):
lowercase__ : Dict = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 81 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = False ) -> str:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : Optional[int] = f'''Expected string as input, found {type(_UpperCamelCase )}'''
raise ValueError(_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ : str = f'''Expected boolean as use_pascal parameter, found {type(_UpperCamelCase )}'''
raise ValueError(_UpperCamelCase )
snake_case_ : Optional[int] = input_str.split('''_''' )
snake_case_ : Dict = 0 if use_pascal else 1
snake_case_ : Optional[Any] = words[start_index:]
snake_case_ : Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case_ : Union[str, Any] = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 531 | 0 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> int:
'''simple docstring'''
lowerCamelCase_ = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = image.to(self.device )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ ), "This is a local test"
| 720 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A_ = "src/transformers"
A_ = "docs/source/en/tasks"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
with open(__UpperCamelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
lowerCamelCase_ = f.readlines()
# Find the start prompt.
lowerCamelCase_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
lowerCamelCase_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A_ = direct_transformers_import(TRANSFORMERS_PATH)
A_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = TASK_GUIDE_TO_MODELS[task_guide]
lowerCamelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
lowerCamelCase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' ,end_prompt='<!--End of the generated tip-->' ,)
lowerCamelCase_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 384 | 0 |
def lowercase ( SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_ = True
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_ = False
for i in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_ = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
SCREAMING_SNAKE_CASE__ : Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
SCREAMING_SNAKE_CASE__ : Union[str, Any] = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 205 |
'''simple docstring'''
_UpperCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_UpperCAmelCase : List[str] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> str:
assert len(str(UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCamelCase : Any = year // 1_00
_UpperCamelCase : List[Any] = (5 * (century % 4) + 2) % 7
_UpperCamelCase : Tuple = year % 1_00
_UpperCamelCase : Optional[int] = centurian % 12
_UpperCamelCase : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCamelCase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCamelCase : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: Optional[int] = [10, 20, 30, 40, 50, 60]
lowercase__: str = [2, 4, 6, 8, 10, 12]
lowercase__: Union[str, Any] = 100
self.assertEqual(kp.calc_profit(A__ , A__ , A__ ) , 210 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(A__ , 'max_weight must greater than zero.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
self.assertRaisesRegex(A__ , 'Weight can not be negative.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertRaisesRegex(A__ , 'Profit can not be negative.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertRaisesRegex(A__ , 'max_weight must greater than zero.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertRaisesRegex(
A__ , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 720 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCAmelCase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ) -> str:
lowercase__: Dict = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__: Any = bs[:]
lowercase__: int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
lowercase__: Any = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def snake_case_ ( snake_case ) -> Optional[Any]:
lowercase__: Optional[int] = set()
lowercase__: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__: Dict = char
return pairs
class __a ( __UpperCamelCase ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
lowercase__: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
lowercase__: Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
lowercase__: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
lowercase__: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
lowercase__: Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
lowercase__: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='utf-8' ) as vocab_handle:
lowercase__: int = json.load(lowerCAmelCase__ )
lowercase__: int = {v: k for k, v in self.encoder.items()}
lowercase__: Any = errors # how to handle errors in decoding
lowercase__: Optional[Any] = bytes_to_unicode()
lowercase__: Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='utf-8' ) as merges_handle:
lowercase__: Any = merges_handle.read().split('\n' )[1:-1]
lowercase__: int = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__: Optional[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__: Any = {}
lowercase__: str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__: Tuple = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase__: List[Any] = tuple(lowerCAmelCase__ )
lowercase__: str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowercase__: str = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__: Optional[Any] = bigram
lowercase__: str = []
lowercase__: Tuple = 0
while i < len(lowerCAmelCase__ ):
try:
lowercase__: Dict = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__: Optional[int] = tuple(lowerCAmelCase__ )
lowercase__: List[Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowercase__: List[str] = get_pairs(lowerCAmelCase__ )
lowercase__: Optional[int] = ' '.join(lowerCAmelCase__ )
lowercase__: Dict = word
return word
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
lowercase__: Optional[Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(' ' ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = ''.join(lowerCAmelCase__ )
lowercase__: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '\n' )
lowercase__: Optional[Any] = 0
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowercase__: Optional[int] = token_index
writer.write(' '.join(lowerCAmelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Union[str, Any] = [self.cls_token_id]
lowercase__: Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Optional[int] = [self.sep_token_id]
lowercase__: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
lowercase__: int = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowercase__: List[str] = ' ' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> dict:
'''simple docstring'''
lowercase__: Optional[Any] = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
lowercase__: Tuple = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__: int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__: Tuple = len(encoded_inputs['global_attention_mask'] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
lowercase__: Optional[Any] = len(lowerCAmelCase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__: str = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__: Optional[Any] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 335 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class UpperCAmelCase ( A_ ):
@add_start_docstrings(snake_case__ )
def __call__(self : List[Any] , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : List[Any] ) -> bool:
'''simple docstring'''
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class UpperCAmelCase ( A_ ):
def __init__(self : str , snake_case__ : int , snake_case__ : Optional[int] = None ) -> int:
'''simple docstring'''
snake_case : Any = max_length
snake_case : int = max_position_embeddings
@add_start_docstrings(snake_case__ )
def __call__(self : int , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : Any ) -> bool:
'''simple docstring'''
snake_case : Tuple = input_ids.shape[-1]
snake_case : Optional[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """
"exceptions, performance degradation, or nothing at all." )
return is_done
class UpperCAmelCase ( A_ ):
def __init__(self : List[str] , snake_case__ : int , snake_case__ : int ) -> Dict:
'''simple docstring'''
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """
"with `max_length = start_length + max_new_tokens` instead." , snake_case__ , )
snake_case : int = start_length
snake_case : str = max_new_tokens
snake_case : int = start_length + max_new_tokens
@add_start_docstrings(snake_case__ )
def __call__(self : str , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : Dict ) -> bool:
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class UpperCAmelCase ( A_ ):
def __init__(self : List[str] , snake_case__ : float , snake_case__ : Optional[float] = None ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = max_time
snake_case : List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case__ )
def __call__(self : int , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : Optional[int] ) -> bool:
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class UpperCAmelCase ( A_ ):
@add_start_docstrings(snake_case__ )
def __call__(self : Tuple , snake_case__ : torch.LongTensor , snake_case__ : torch.FloatTensor , **snake_case__ : Union[str, Any] ) -> bool:
'''simple docstring'''
return any(criteria(snake_case__ , snake_case__ ) for criteria in self )
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[int]:
'''simple docstring'''
for stopping_criterium in self:
if isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
elif isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
return None
def UpperCamelCase ( __lowerCamelCase : StoppingCriteriaList , __lowerCamelCase : int ):
snake_case : Union[str, Any] = stopping_criteria.max_length
snake_case : List[str] = deepcopy(__lowerCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , __lowerCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__lowerCamelCase ) )
return new_stopping_criteria
| 204 |
def UpperCamelCase ( __lowerCamelCase : int = 1 , __lowerCamelCase : int = 1000 ):
snake_case : int = 1
snake_case : int = 0
for divide_by_number in range(__lowerCamelCase , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
snake_case : List[Any] = len(__lowerCamelCase )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
snake_case_ = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ )
snake_case_ = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase_ , os.listdir(UpperCAmelCase_ )[0] , "snapshots" ) )]
snake_case_ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self ):
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=UpperCAmelCase_ )
snake_case_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 4
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = pipeline.prepare_inputs(UpperCAmelCase_ )
# shard inputs and rng
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = jax.random.split(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = pipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(UpperCAmelCase_ , dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
snake_case_ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase_ ) == num_samples
def _lowercase ( self ):
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=UpperCAmelCase_ )
snake_case_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 50
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = pipeline.prepare_inputs(UpperCAmelCase_ )
# shard inputs and rng
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = jax.random.split(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = pipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase_ , dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def _lowercase ( self ):
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase_ )
snake_case_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 50
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = pipeline.prepare_inputs(UpperCAmelCase_ )
# shard inputs and rng
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = jax.random.split(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = pipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase_ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def _lowercase ( self ):
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
snake_case_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 50
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = pipeline.prepare_inputs(UpperCAmelCase_ )
# shard inputs and rng
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = jax.random.split(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = pipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase_ , dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def _lowercase ( self ):
snake_case_ = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=UpperCAmelCase_ , steps_offset=1 , )
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , )
snake_case_ = scheduler.create_state()
snake_case_ = scheduler_state
snake_case_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 50
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = pipeline.prepare_inputs(UpperCAmelCase_ )
# shard inputs and rng
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = jax.random.split(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = pipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase_ , dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def _lowercase ( self ):
snake_case_ = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase_ )
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase_ , )
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = pipeline.prepare_inputs(UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = pipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
snake_case_ = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
snake_case_ , snake_case_ = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase_ , use_memory_efficient_attention=UpperCAmelCase_ , )
snake_case_ = replicate(UpperCAmelCase_ )
snake_case_ = pipeline.prepare_inputs(UpperCAmelCase_ )
snake_case_ = shard(UpperCAmelCase_ )
snake_case_ = pipeline(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , jit=UpperCAmelCase_ ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
snake_case_ = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 420 |
'''simple docstring'''
def __snake_case ( lowercase : int ):
snake_case_ = [[0 for _ in range(lowercase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
snake_case_ = 1
for n in range(m + 1 ):
for k in range(1 , lowercase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase__ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 420 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A = 6 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
self.create_linked_list(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Node()
SCREAMING_SNAKE_CASE : List[str] = current_node
SCREAMING_SNAKE_CASE : Any = current_node
SCREAMING_SNAKE_CASE : Dict = current_node
for _ in range(1, A ):
SCREAMING_SNAKE_CASE : Dict = Node()
SCREAMING_SNAKE_CASE : List[str] = current_node
SCREAMING_SNAKE_CASE : Optional[Any] = previous_node
SCREAMING_SNAKE_CASE : Optional[int] = current_node
SCREAMING_SNAKE_CASE : str = self.front
SCREAMING_SNAKE_CASE : Optional[int] = previous_node
def UpperCamelCase_ ( self ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE : List[str] = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE : List[str] = data
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE : List[Any] = self.front.data
SCREAMING_SNAKE_CASE : Optional[Any] = None
return data
SCREAMING_SNAKE_CASE : Dict = self.front
SCREAMING_SNAKE_CASE : List[str] = old_front.next
SCREAMING_SNAKE_CASE : List[str] = old_front.data
SCREAMING_SNAKE_CASE : str = None
return data
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.is_empty():
raise Exception('Empty Queue' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any | None = None
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Union[str, Any] = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Any:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 1
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
a__ = True
def __call__( self :List[str] , lowerCAmelCase__ :Tensor ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : Optional[int] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Optional[int] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class A_ (nn.Module ):
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :nn.Module ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'''Unexpected layer name {k}'''
snake_case_ : str = len(lowerCAmelCase__ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
snake_case_ : str = nn.ModuleDict(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Tensor ) -> Optional[Any]:
'''simple docstring'''
return get_trunk_forward_outputs(
lowerCAmelCase__ , out_feat_keys=lowerCAmelCase__ , feature_blocks=self._feature_blocks , )
class A_ (a_ ):
"""simple docstring"""
def _A ( self :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
snake_case_ : Tuple = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self :Any , lowerCAmelCase__ :str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
snake_case_ : Optional[Any] = self.convert_name_to_timm(lowerCAmelCase__ )
snake_case_ : Any = partial(lambda: (timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ).eval(), None) )
else:
snake_case_ : Any = super().__getitem__(lowerCAmelCase__ )
return val
class A_ (a_ ):
"""simple docstring"""
def __getitem__( self :Union[str, Any] , lowerCAmelCase__ :str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
snake_case_ : List[Any] = RegNetModel
else:
snake_case_ : Any = RegNetForImageClassification
return val
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
for from_key, to_key in keys:
snake_case_ : List[Any] = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True ,)-> Union[str, Any]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_, snake_case_ : Tuple = from_model_func()
snake_case_ : str = our_model_func(__magic_name__ ).eval()
snake_case_ : List[Any] = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ ,raise_if_mismatch=__magic_name__ )
snake_case_ : List[Any] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
if from_state_dict is not None:
snake_case_ : Optional[int] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
snake_case_ : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
snake_case_ : Union[str, Any] = manually_copy_vissl_head(__magic_name__ ,our_model.state_dict() ,__magic_name__ )
our_model.load_state_dict(__magic_name__ )
snake_case_ : str = our_model(__magic_name__ ,output_hidden_states=__magic_name__ )
snake_case_ : Optional[Any] = (
our_outputs.logits if isinstance(__magic_name__ ,__magic_name__ ) else our_outputs.last_hidden_state
)
snake_case_ : Tuple = from_model(__magic_name__ )
snake_case_ : str = from_output[-1] if type(__magic_name__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
snake_case_ : Tuple = our_outputs.hidden_states[-1]
assert torch.allclose(__magic_name__ ,__magic_name__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
snake_case_ : List[str] = 224 if "seer" not in name else 384
# we can use the convnext one
snake_case_ : str = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ,size=__magic_name__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = "imagenet-1k-id2label.json"
snake_case_ : Dict = 1000
snake_case_ : Union[str, Any] = (1, num_labels)
snake_case_ : str = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : int = json.load(open(cached_download(hf_hub_url(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ) ,"r" ) )
snake_case_ : int = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : Dict = {v: k for k, v in idalabel.items()}
snake_case_ : str = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Dict = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] ,hidden_sizes=[24, 56, 152, 368] ,groups_width=8 ,layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] ,hidden_sizes=[32, 64, 160, 384] ,groups_width=16 ,layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] ,hidden_sizes=[48, 96, 240, 528] ,groups_width=24 ,layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] ,hidden_sizes=[64, 128, 288, 672] ,groups_width=16 ,layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] ,hidden_sizes=[72, 168, 408, 912] ,groups_width=24 ,layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] ,hidden_sizes=[96, 192, 432, 1008] ,groups_width=48 ,layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] ,hidden_sizes=[80, 240, 560, 1360] ,groups_width=40 ,layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] ,hidden_sizes=[168, 392, 784, 1624] ,groups_width=56 ,layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] ,hidden_sizes=[80, 240, 720, 1920] ,groups_width=120 ,layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] ,hidden_sizes=[224, 448, 896, 2240] ,groups_width=112 ,layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] ,hidden_sizes=[256, 512, 896, 2048] ,groups_width=128 ,layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] ,hidden_sizes=[336, 672, 1344, 2520] ,groups_width=168 ,layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] ,hidden_sizes=[24, 56, 152, 368] ,groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] ,hidden_sizes=[48, 104, 208, 440] ,groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] ,hidden_sizes=[48, 112, 256, 608] ,groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] ,hidden_sizes=[64, 128, 320, 768] ,groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] ,hidden_sizes=[48, 120, 336, 888] ,groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] ,hidden_sizes=[72, 216, 576, 1512] ,groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] ,hidden_sizes=[128, 192, 512, 1088] ,groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] ,hidden_sizes=[144, 288, 576, 1296] ,groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] ,hidden_sizes=[168, 448, 896, 2016] ,groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] ,hidden_sizes=[224, 448, 896, 2240] ,groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] ,hidden_sizes=[224, 448, 1232, 3024] ,groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] ,hidden_sizes=[328, 984, 1968, 4920] ,groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[528, 1056, 2904, 7392] ,groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] ,hidden_sizes=[640, 1696, 2544, 5088] ,groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[2020, 4040, 1_1110, 2_8280] ,groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] ,hidden_sizes=[328, 984, 1968, 4920] ,groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[528, 1056, 2904, 7392] ,groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] ,hidden_sizes=[640, 1696, 2544, 5088] ,groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] ,hidden_sizes=[2020, 4040, 1_1110, 2_8280] ,groups_width=1010 ),
}
snake_case_ : Any = NameToOurModelFuncMap()
snake_case_ : Tuple = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__magic_name__ ,__magic_name__ ) -> Tuple[nn.Module, Dict]:
snake_case_ : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ ,model_dir=str(__magic_name__ ) ,map_location="cpu" )
snake_case_ : int = model_func()
# check if we have a head, if yes add it
snake_case_ : Union[str, Any] = files["classy_state_dict"]["base_model"]["model"]
snake_case_ : Union[str, Any] = model_state_dict["trunk"]
model.load_state_dict(__magic_name__ )
return model.eval(), model_state_dict["heads"]
# pretrained
snake_case_ : Any = partial(
__magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
snake_case_ : Dict = partial(
__magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
snake_case_ : Optional[int] = partial(
__magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) ,)
snake_case_ : Union[str, Any] = partial(
__magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" ,lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 ,group_width=1010 ,w_a=1744 ,w_a=620.83 ,w_m=2.52 ) ) ) ,)
# IN1K finetuned
snake_case_ : str = partial(
__magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
snake_case_ : Optional[int] = partial(
__magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,)
snake_case_ : str = partial(
__magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) ,)
snake_case_ : Union[str, Any] = partial(
__magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" ,lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 ,group_width=1010 ,w_a=1744 ,w_a=620.83 ,w_m=2.52 ) ) ) ,)
if model_name:
convert_weight_and_push(
__magic_name__ ,names_to_from_model_map[model_name] ,names_to_ours_model_map[model_name] ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ ,)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__magic_name__ ,names_to_from_model_map[model_name] ,names_to_ours_model_map[model_name] ,__magic_name__ ,__magic_name__ ,__magic_name__ ,)
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 656 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 1 |
import torch
from transformers import AutoModel
class __magic_name__ ( torch.nn.Module ):
def __init__( self , _lowercase="sayef/fsner-bert-base-uncased" )-> Any:
super(_lowercase , self ).__init__()
UpperCamelCase_ = AutoModel.from_pretrained(_lowercase , return_dict=_lowercase )
UpperCamelCase_ = torch.nn.CosineSimilarity(3 , 1e-0_8 )
UpperCamelCase_ = torch.nn.Softmax(dim=1 )
def UpperCAmelCase_ ( self , **_lowercase )-> Optional[int]:
return self.bert(**_lowercase ).last_hidden_state
def UpperCAmelCase_ ( self , _lowercase )-> Tuple:
return token_embeddings.sum(2 , keepdim=_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase=1 )-> Any:
return self.softmax(T * self.cos(_lowercase , _lowercase ) )
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> Optional[Any]:
UpperCamelCase_ = W_supports["sizes"].tolist()
UpperCamelCase_ = W_supports["start_token_id"].item()
UpperCamelCase_ = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase_ = self.BERT(**_lowercase )
UpperCamelCase_ = self.BERT(**_lowercase )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = W_supports["input_ids"] == start_token_id
UpperCamelCase_ = W_supports["input_ids"] == end_token_id
for i, size in enumerate(_lowercase ):
if i == 0:
UpperCamelCase_ = 0
else:
UpperCamelCase_ = support_sizes[i - 1]
UpperCamelCase_ = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase_ = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase_ = torch.vstack((p_starts, p_start) )
UpperCamelCase_ = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase_ = p_start
UpperCamelCase_ = p_end
return p_starts, p_ends
| 628 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase_ = _modexpt(SCREAMING_SNAKE_CASE_ , exponent // 2 , SCREAMING_SNAKE_CASE_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(SCREAMING_SNAKE_CASE_ , exponent - 1 , SCREAMING_SNAKE_CASE_ )) % modulo_value
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 1_7_7_7 , SCREAMING_SNAKE_CASE_ = 1_8_5_5 , SCREAMING_SNAKE_CASE_ = 8 )-> int:
"""simple docstring"""
UpperCamelCase_ = base
for _ in range(1 , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = _modexpt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1_0**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 628 | 1 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def __get__( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""")
lowerCAmelCase = """__cached_""" + self.fget.__name__
lowerCAmelCase = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
if cached is None:
lowerCAmelCase = self.fget(__lowerCAmelCase)
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
return cached
def snake_case__ ( _A: List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"invalid truth value {val!r}" )
def snake_case__ ( _A: List[Any] ) -> Optional[int]:
'''simple docstring'''
if is_torch_fx_proxy(_A ):
return True
if is_torch_available():
import torch
if isinstance(_A , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_A , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_A , (jnp.ndarray, Tracer) ):
return True
return isinstance(_A , np.ndarray )
def snake_case__ ( _A: Any ) -> str:
'''simple docstring'''
return isinstance(_A , np.ndarray )
def snake_case__ ( _A: Dict ) -> int:
'''simple docstring'''
return _is_numpy(_A )
def snake_case__ ( _A: Dict ) -> Dict:
'''simple docstring'''
import torch
return isinstance(_A , torch.Tensor )
def snake_case__ ( _A: Dict ) -> Union[str, Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(_A )
def snake_case__ ( _A: List[Any] ) -> Tuple:
'''simple docstring'''
import torch
return isinstance(_A , torch.device )
def snake_case__ ( _A: List[str] ) -> Dict:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(_A )
def snake_case__ ( _A: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(_A , _A ):
if hasattr(_A , _A ):
lowerCAmelCase = getattr(_A , _A )
else:
return False
return isinstance(_A , torch.dtype )
def snake_case__ ( _A: Optional[int] ) -> Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(_A )
def snake_case__ ( _A: int ) -> List[Any]:
'''simple docstring'''
import tensorflow as tf
return isinstance(_A , tf.Tensor )
def snake_case__ ( _A: Dict ) -> str:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(_A )
def snake_case__ ( _A: str ) -> Any:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_A , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(_A )
return type(_A ) == tf.Tensor
def snake_case__ ( _A: Dict ) -> int:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(_A )
def snake_case__ ( _A: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(_A , jnp.ndarray )
def snake_case__ ( _A: Union[str, Any] ) -> Tuple:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(_A )
def snake_case__ ( _A: List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(_A , (dict, UserDict) ):
return {k: to_py_obj(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return [to_py_obj(_A ) for o in obj]
elif is_tf_tensor(_A ):
return obj.numpy().tolist()
elif is_torch_tensor(_A ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_A ):
return np.asarray(_A ).tolist()
elif isinstance(_A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def snake_case__ ( _A: Union[str, Any] ) -> Dict:
'''simple docstring'''
if isinstance(_A , (dict, UserDict) ):
return {k: to_numpy(_A ) for k, v in obj.items()}
elif isinstance(_A , (list, tuple) ):
return np.array(_A )
elif is_tf_tensor(_A ):
return obj.numpy()
elif is_torch_tensor(_A ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_A ):
return np.asarray(_A )
else:
return obj
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = fields(self)
# Safety and consistency checks
if not len(__lowerCAmelCase):
raise ValueError(f"{self.__class__.__name__} has no fields.")
if not all(field.default is None for field in class_fields[1:]):
raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
lowerCAmelCase = getattr(self , class_fields[0].name)
lowerCAmelCase = all(getattr(self , field.name) is None for field in class_fields[1:])
if other_fields_are_none and not is_tensor(__lowerCAmelCase):
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = first_field.items()
lowerCAmelCase = True
else:
try:
lowerCAmelCase = iter(__lowerCAmelCase)
lowerCAmelCase = True
except TypeError:
lowerCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__lowerCAmelCase):
if (
not isinstance(__lowerCAmelCase , (list, tuple))
or not len(__lowerCAmelCase) == 2
or not isinstance(element[0] , __lowerCAmelCase)
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"Cannot set key/value for {element}. It needs to be a tuple (key, value).")
break
setattr(self , element[0] , element[1])
if element[1] is not None:
lowerCAmelCase = element[1]
elif first_field is not None:
lowerCAmelCase = first_field
else:
for field in class_fields:
lowerCAmelCase = getattr(self , field.name)
if v is not None:
lowerCAmelCase = v
def __delitem__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
def __getitem__( self , __lowerCAmelCase):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowerCAmelCase = dict(self.items())
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__lowerCAmelCase , __lowerCAmelCase)
super().__setattr__(__lowerCAmelCase , __lowerCAmelCase)
def __setitem__( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
super().__setitem__(__lowerCAmelCase , __lowerCAmelCase)
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
return tuple(self[k] for k in self.keys())
class a__( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
@classmethod
def a_ ( cls , __lowerCAmelCase):
"""simple docstring"""
raise ValueError(
f"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}")
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = '''longest'''
UpperCAmelCase_ : List[Any] = '''max_length'''
UpperCAmelCase_ : Dict = '''do_not_pad'''
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = '''pt'''
UpperCAmelCase_ : List[Any] = '''tf'''
UpperCAmelCase_ : Optional[int] = '''np'''
UpperCAmelCase_ : str = '''jax'''
class a__:
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = context_managers
lowerCAmelCase = ExitStack()
def __enter__( self):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__lowerCAmelCase)
def __exit__( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
self.stack.__exit__(*__lowerCAmelCase , **__lowerCAmelCase)
def snake_case__ ( _A: Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase = infer_framework(_A )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def snake_case__ ( _A: str ) -> List[str]:
'''simple docstring'''
lowerCAmelCase = model_class.__name__
lowerCAmelCase = infer_framework(_A )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def snake_case__ ( _A: MutableMapping , _A: str = "" , _A: str = "." ) -> str:
'''simple docstring'''
def _flatten_dict(_A: Optional[Any] , _A: int="" , _A: Optional[Any]="." ):
for k, v in d.items():
lowerCAmelCase = str(_A ) + delimiter + str(_A ) if parent_key else k
if v and isinstance(_A , _A ):
yield from flatten_dict(_A , _A , delimiter=_A ).items()
else:
yield key, v
return dict(_flatten_dict(_A , _A , _A ) )
@contextmanager
def snake_case__ ( _A: List[Any] , _A: bool = False ) -> int:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def snake_case__ ( _A: Optional[Any] , _A: Dict=None ) -> Any:
'''simple docstring'''
if is_numpy_array(_A ):
return np.transpose(_A , axes=_A )
elif is_torch_tensor(_A ):
return array.T if axes is None else array.permute(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.transpose(_A , perm=_A )
elif is_jax_tensor(_A ):
return jnp.transpose(_A , axes=_A )
else:
raise ValueError(f"Type not supported for transpose: {type(_A )}." )
def snake_case__ ( _A: Any , _A: str ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(_A ):
return np.reshape(_A , _A )
elif is_torch_tensor(_A ):
return array.reshape(*_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.reshape(_A , _A )
elif is_jax_tensor(_A ):
return jnp.reshape(_A , _A )
else:
raise ValueError(f"Type not supported for reshape: {type(_A )}." )
def snake_case__ ( _A: Optional[int] , _A: Dict=None ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(_A ):
return np.squeeze(_A , axis=_A )
elif is_torch_tensor(_A ):
return array.squeeze() if axis is None else array.squeeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.squeeze(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.squeeze(_A , axis=_A )
else:
raise ValueError(f"Type not supported for squeeze: {type(_A )}." )
def snake_case__ ( _A: Optional[Any] , _A: Any ) -> Any:
'''simple docstring'''
if is_numpy_array(_A ):
return np.expand_dims(_A , _A )
elif is_torch_tensor(_A ):
return array.unsqueeze(dim=_A )
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.expand_dims(_A , axis=_A )
elif is_jax_tensor(_A ):
return jnp.expand_dims(_A , axis=_A )
else:
raise ValueError(f"Type not supported for expand_dims: {type(_A )}." )
def snake_case__ ( _A: Optional[Any] ) -> Tuple:
'''simple docstring'''
if is_numpy_array(_A ):
return np.size(_A )
elif is_torch_tensor(_A ):
return array.numel()
elif is_tf_tensor(_A ):
import tensorflow as tf
return tf.size(_A )
elif is_jax_tensor(_A ):
return array.size
else:
raise ValueError(f"Type not supported for expand_dims: {type(_A )}." )
def snake_case__ ( _A: Any , _A: Tuple ) -> List[Any]:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(_A , (tuple, list) ):
lowerCAmelCase = [f"{repo_id}--{v}" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase = f"{repo_id}--{value}"
return auto_map
def snake_case__ ( _A: Tuple ) -> List[Any]:
'''simple docstring'''
for base_class in inspect.getmro(_A ):
lowerCAmelCase = base_class.__module__
lowerCAmelCase = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"Could not infer framework from class {model_class}." )
| 702 | '''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
lowerCAmelCase = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
lowerCAmelCase = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
lowerCAmelCase = tf_top_k_top_p_filtering(__lowerCAmelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4)
lowerCAmelCase = output[output != -float("""inf""")]
lowerCAmelCase = tf.cast(
tf.where(tf.not_equal(__lowerCAmelCase , tf.constant(-float("""inf""") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-1_2)
tf.debugging.assert_equal(__lowerCAmelCase , __lowerCAmelCase)
@require_tf
class a__( unittest.TestCase , lowerCAmelCase__ ):
'''simple docstring'''
if is_tf_available():
UpperCAmelCase_ : Dict = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
lowerCAmelCase = 2
lowerCAmelCase = 2
class a__( tf.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super(__lowerCAmelCase , self).__init__()
lowerCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids"""),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask"""),
) , jit_compile=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.model.generate(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , max_new_tokens=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase = [[2, 0], [102, 103]]
lowerCAmelCase = [[1, 0], [1, 1]]
lowerCAmelCase = DummyModel(model=__lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCAmelCase , __lowerCAmelCase , signatures={"""serving_default""": dummy_model.serving})
lowerCAmelCase = tf.saved_model.load(__lowerCAmelCase).signatures["""serving_default"""]
for batch_size in range(1 , len(__lowerCAmelCase) + 1):
lowerCAmelCase = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size]),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size]),
}
lowerCAmelCase = serving_func(**__lowerCAmelCase)["""sequences"""]
lowerCAmelCase = test_model.generate(**__lowerCAmelCase , max_new_tokens=__lowerCAmelCase)
tf.debugging.assert_equal(__lowerCAmelCase , __lowerCAmelCase)
@slow
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
lowerCAmelCase = 1
lowerCAmelCase = 2
class a__( tf.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
super(__lowerCAmelCase , self).__init__()
lowerCAmelCase = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids"""),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask"""),
) , jit_compile=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.model.generate(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , max_new_tokens=__lowerCAmelCase , return_dict_in_generate=__lowerCAmelCase , )
return {"sequences": outputs["sequences"]}
lowerCAmelCase = [[2], [102, 103]]
lowerCAmelCase = [[1], [1, 1]]
lowerCAmelCase = DummyModel(model=__lowerCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowerCAmelCase , __lowerCAmelCase , signatures={"""serving_default""": dummy_model.serving})
lowerCAmelCase = tf.saved_model.load(__lowerCAmelCase).signatures["""serving_default"""]
for input_row in range(len(__lowerCAmelCase)):
lowerCAmelCase = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]]),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]]),
}
lowerCAmelCase = serving_func(**__lowerCAmelCase)["""sequences"""]
lowerCAmelCase = test_model.generate(**__lowerCAmelCase , max_new_tokens=__lowerCAmelCase)
tf.debugging.assert_equal(__lowerCAmelCase , __lowerCAmelCase)
@slow
@require_tensorflow_text
def a_ ( self):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__lowerCAmelCase)
class a__( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self):
"""simple docstring"""
super().__init__()
lowerCAmelCase = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__lowerCAmelCase , """spiece.model""") , """rb""").read())
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""")
def a_ ( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.tokenizer.tokenize(__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = text.pad_model_inputs(
__lowerCAmelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id)
lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase)
return self.tokenizer.detokenize(__lowerCAmelCase)
lowerCAmelCase = CompleteSentenceTransformer()
lowerCAmelCase = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""")
lowerCAmelCase = complete_model(__lowerCAmelCase)
lowerCAmelCase = tf.keras.Model(__lowerCAmelCase , __lowerCAmelCase)
keras_model.save(__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
lowerCAmelCase = 14
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
lowerCAmelCase = """Hello, my dog is cute and"""
lowerCAmelCase = tokenizer(__lowerCAmelCase , return_tensors="""tf""")
lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
lowerCAmelCase = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0"""):
tf.random.set_seed(0)
lowerCAmelCase = model.generate(**__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
lowerCAmelCase = [638, 198]
with tf.device(""":/CPU:0"""):
tf.random.set_seed(0)
lowerCAmelCase = model.generate(**__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase)
self.assertTrue(expectation == len(generated_tokens[0]))
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""")
lowerCAmelCase = """Hugging Face is a technology company based in New York and Paris."""
lowerCAmelCase = bart_tokenizer(__lowerCAmelCase , return_tensors="""tf""").input_ids
lowerCAmelCase = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""")
lowerCAmelCase = bart_model.generate(__lowerCAmelCase).numpy()
class a__( lowerCAmelCase__ ):
'''simple docstring'''
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
return super().call(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""")
lowerCAmelCase = bart_model.generate(__lowerCAmelCase , foo="""bar""").numpy()
self.assertTrue(np.array_equal(__lowerCAmelCase , __lowerCAmelCase))
class a__( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return super().call(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = FakeEncoder(bart_model.config , bart_model.model.shared)
lowerCAmelCase = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
lowerCAmelCase = bart_model.generate(__lowerCAmelCase).numpy()
with self.assertRaises(__lowerCAmelCase):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__lowerCAmelCase , foo="""bar""")
| 605 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ):
'''simple docstring'''
A: Optional[Any] = 0
A: List[str] = len(lowerCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A: Optional[int] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
A: str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A: Dict = left
A: Optional[int] = point
elif point > right:
A: Optional[Any] = right
A: Tuple = point
else:
if item < current_item:
A: int = point - 1
else:
A: Tuple = point + 1
return None
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Dict ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A: Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCamelCase__ , lowerCamelCase__ , point + 1 , lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if collection != sorted(lowerCamelCase__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
__SCREAMING_SNAKE_CASE : Dict =0
if debug == 1:
__SCREAMING_SNAKE_CASE : int =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
__SCREAMING_SNAKE_CASE : Tuple =67
__SCREAMING_SNAKE_CASE : List[Any] =interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print('Not found')
| 135 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Union[str, Any] = (DDIMParallelScheduler,)
A__ : Optional[Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def a__ ( self , **A ) -> Union[str, Any]:
A: str = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**A )
return config
def a__ ( self , **A ) -> Tuple:
A: Optional[int] = self.scheduler_classes[0]
A: Optional[Any] = self.get_scheduler_config(**A )
A: int = scheduler_class(**A )
A , A: Union[str, Any] = 10, 0.0
A: List[str] = self.dummy_model()
A: Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(A )
for t in scheduler.timesteps:
A: List[str] = model(A , A )
A: Optional[int] = scheduler.step(A , A , A , A ).prev_sample
return sample
def a__ ( self ) -> Dict:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=A )
def a__ ( self ) -> Dict:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A )
A: List[Any] = self.scheduler_classes[0]
A: List[Any] = self.get_scheduler_config(steps_offset=1 )
A: int = scheduler_class(**A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def a__ ( self ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def a__ ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def a__ ( self ) -> Optional[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def a__ ( self ) -> List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def a__ ( self ) -> Optional[int]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=A )
def a__ ( self ) -> Tuple:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=A )
def a__ ( self ) -> Tuple:
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def a__ ( self ) -> Union[str, Any]:
for t in [1, 10, 49]:
self.check_over_forward(time_step=A )
def a__ ( self ) -> int:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=A , num_inference_steps=A )
def a__ ( self ) -> Optional[Any]:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=A , eta=A )
def a__ ( self ) -> Union[str, Any]:
A: Tuple = self.scheduler_classes[0]
A: List[Any] = self.get_scheduler_config()
A: int = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def a__ ( self ) -> Dict:
A: Optional[Any] = self.scheduler_classes[0]
A: int = self.get_scheduler_config()
A: int = scheduler_class(**A )
A , A: str = 10, 0.0
scheduler.set_timesteps(A )
A: Tuple = self.dummy_model()
A: Optional[int] = self.dummy_sample_deter
A: int = self.dummy_sample_deter + 0.1
A: List[str] = self.dummy_sample_deter - 0.1
A: Any = samplea.shape[0]
A: int = torch.stack([samplea, samplea, samplea] , dim=0 )
A: Optional[Any] = torch.arange(A )[0:3, None].repeat(1 , A )
A: List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
A: Dict = scheduler.batch_step_no_noise(A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , A )
A: Tuple = torch.sum(torch.abs(A ) )
A: Tuple = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def a__ ( self ) -> Tuple:
A: Dict = self.full_loop()
A: str = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
A: str = self.full_loop(prediction_type="""v_prediction""" )
A: Any = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def a__ ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
A: Dict = self.full_loop(set_alpha_to_one=A , beta_start=0.01 )
A: Tuple = torch.sum(torch.abs(A ) )
A: Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def a__ ( self ) -> Tuple:
# We specify different beta, so that the first alpha is 0.99
A: List[Any] = self.full_loop(set_alpha_to_one=A , beta_start=0.01 )
A: List[Any] = torch.sum(torch.abs(A ) )
A: Optional[int] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 135 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = int(_UpperCAmelCase )
assert noofclusters < len(_UpperCAmelCase )
# Find out the dimensionality
lowerCamelCase_: str = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCamelCase_: Optional[Any] = list(range(len(_UpperCAmelCase ) ) )
shuffle(_UpperCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCamelCase_: List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCamelCase_: List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCamelCase_: Tuple = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_UpperCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCamelCase_: Optional[Any] = tf.placeholder("""float64""" , [dim] )
lowerCamelCase_: Any = []
for centroid in centroids:
cent_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCamelCase_: int = [tf.Variable(0 ) for i in range(len(_UpperCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCamelCase_: Optional[int] = tf.placeholder("""int32""" )
lowerCamelCase_: int = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCamelCase_: str = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCamelCase_: Optional[int] = tf.reduce_mean(_UpperCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCamelCase_: Optional[Any] = tf.placeholder("""float""" , [dim] )
lowerCamelCase_: List[str] = tf.placeholder("""float""" , [dim] )
lowerCamelCase_: int = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_UpperCAmelCase , _UpperCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCamelCase_: List[Any] = tf.placeholder("""float""" , [noofclusters] )
lowerCamelCase_: int = tf.argmin(_UpperCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCamelCase_: List[str] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_UpperCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCamelCase_: Tuple = 1_0_0
for _ in range(_UpperCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_UpperCAmelCase ) ):
lowerCamelCase_: str = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCamelCase_: List[str] = [
sess.run(_UpperCAmelCase , feed_dict={va: vect, va: sess.run(_UpperCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCamelCase_: Optional[Any] = sess.run(
_UpperCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_UpperCAmelCase ):
# Collect all the vectors assigned to this cluster
lowerCamelCase_: List[Any] = [
vectors[i]
for i in range(len(_UpperCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCamelCase_: str = sess.run(
_UpperCAmelCase , feed_dict={mean_input: array(_UpperCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCamelCase_: List[Any] = sess.run(_UpperCAmelCase )
lowerCamelCase_: Any = sess.run(_UpperCAmelCase )
return centroids, assignments
| 705 | class a__ ( __SCREAMING_SNAKE_CASE ):
pass
class a__ ( __SCREAMING_SNAKE_CASE ):
pass
class a__ :
def __init__( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: Any = [
[],
[],
[],
]
def lowerCAmelCase ( self : int , A_ : int , A_ : int ) -> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(A_ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class a__ :
def __init__( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = []
def lowerCAmelCase ( self : List[str] , A_ : int ) -> None:
"""simple docstring"""
if len(self.queue ) == 1_00:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(A_ )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
lowerCamelCase_: str = min(self.queue )
self.queue.remove(A_ )
return data
def __str__( self : str ) -> str:
"""simple docstring"""
return str(self.queue )
def UpperCAmelCase_ ( ):
lowerCamelCase_: Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 , 1_0 )
fpq.enqueue(1 , 7_0 )
fpq.enqueue(0 , 1_0_0 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 6_4 )
fpq.enqueue(0 , 1_2_8 )
print(_UpperCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCAmelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def UpperCAmelCase_ ( ):
lowerCamelCase_: str = ElementPriorityQueue()
epq.enqueue(1_0 )
epq.enqueue(7_0 )
epq.enqueue(1_0_0 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(6_4 )
epq.enqueue(1_2_8 )
print(_UpperCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCAmelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 584 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase_ :
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase__ = data
lowerCAmelCase__ = None
class lowerCAmelCase_ :
def __init__( self : Tuple ):
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __iter__( self : List[str] ):
lowerCAmelCase__ = self.head
while self.head:
yield node.data
lowerCAmelCase__ = node.next
if node == self.head:
break
def __len__( self : Dict ):
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ):
return "->".join(str(SCREAMING_SNAKE_CASE_ ) for item in iter(self ) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ):
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
lowerCAmelCase__ = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
lowerCAmelCase__ = new_node # first node points itself
lowerCAmelCase__ = lowerCAmelCase__ = new_node
elif index == 0: # insert at head
lowerCAmelCase__ = self.head
lowerCAmelCase__ = lowerCAmelCase__ = new_node
else:
lowerCAmelCase__ = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = new_node
if index == len(self ) - 1: # insert at tail
lowerCAmelCase__ = new_node
def __snake_case ( self : str ):
return self.delete_nth(0 )
def __snake_case ( self : List[str] ):
return self.delete_nth(len(self ) - 1 )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 0 ):
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
lowerCAmelCase__ = self.head
if self.head == self.tail: # just one node
lowerCAmelCase__ = lowerCAmelCase__ = None
elif index == 0: # delete head node
lowerCAmelCase__ = self.tail.next.next
lowerCAmelCase__ = self.head.next
else:
lowerCAmelCase__ = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next
lowerCAmelCase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCAmelCase__ = temp
return delete_node.data
def __snake_case ( self : str ):
return len(self ) == 0
def lowerCAmelCase_ () -> None:
'''simple docstring'''
lowerCAmelCase__ = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 | 1 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['image_processor', 'tokenizer']
_snake_case = 'BlipImageProcessor'
_snake_case = 'AutoTokenizer'
def __init__( self : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple) ->str:
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase)
# add QFormer tokenizer
_lowerCamelCase : Dict = qformer_tokenizer
def __call__( self : Union[str, Any] , _UpperCamelCase : ImageInput = None , _UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Union[bool, str, PaddingStrategy] = False , _UpperCamelCase : Union[bool, str, TruncationStrategy] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Union[str, TensorType]] = None , **_UpperCamelCase : Dict , ) ->BatchFeature:
"""simple docstring"""
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""")
_lowerCamelCase : Tuple = BatchFeature()
if text is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
encoding.update(_UpperCamelCase)
_lowerCamelCase : List[str] = self.qformer_tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : str = qformer_text_encoding.pop("""input_ids""")
_lowerCamelCase : Any = qformer_text_encoding.pop("""attention_mask""")
if images is not None:
_lowerCamelCase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase)
encoding.update(_UpperCamelCase)
return encoding
def _SCREAMING_SNAKE_CASE ( self : int , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any]) ->List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Tuple , *_UpperCamelCase : List[str] , **_UpperCamelCase : Any) ->List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : int = self.tokenizer.model_input_names
_lowerCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def _SCREAMING_SNAKE_CASE ( self : int , _UpperCamelCase : str , **_UpperCamelCase : Union[str, Any]) ->str:
"""simple docstring"""
if os.path.isfile(_UpperCamelCase):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase)
_lowerCamelCase : str = os.path.join(_UpperCamelCase , """qformer_tokenizer""")
self.qformer_tokenizer.save_pretrained(_UpperCamelCase)
return super().save_pretrained(_UpperCamelCase , **_UpperCamelCase)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , _UpperCamelCase : str , **_UpperCamelCase : int) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(_UpperCamelCase , subfolder="""qformer_tokenizer""")
_lowerCamelCase : str = cls._get_arguments_from_pretrained(_UpperCamelCase , **_UpperCamelCase)
args.append(_UpperCamelCase)
return cls(*_UpperCamelCase)
| 15 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , ):
'''simple docstring'''
__A : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__A : Tuple = parent
__A : str = batch_size
__A : Optional[int] = num_channels
__A : str = image_size
__A : List[str] = min_resolution
__A : Optional[Any] = max_resolution
__A : str = do_resize
__A : Dict = size
__A : Optional[Any] = apply_ocr
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = LayoutLMvaImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize'))
self.assertTrue(hasattr(_UpperCAmelCase , 'size'))
self.assertTrue(hasattr(_UpperCAmelCase , 'apply_ocr'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
__A : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__A : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image)
# Test not batched input
__A : int = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _UpperCAmelCase)
self.assertIsInstance(encoding.boxes , _UpperCAmelCase)
# Test batched
__A : str = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray)
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : Any = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__A : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor)
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__A : int = image_processing(_UpperCAmelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
__A : List[Any] = Image.open(ds[0]['file']).convert('RGB')
__A : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A : str = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__A : Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _UpperCAmelCase)
self.assertListEqual(encoding.boxes , _UpperCAmelCase)
# with apply_OCR = False
__A : str = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase)
__A : List[str] = image_processing(_UpperCAmelCase , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224)) | 8 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _UpperCamelCase ( lowerCAmelCase_ ):
def lowercase ( self: List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def lowercase ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase ( self: List[str] ) -> Dict:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def lowercase ( self: List[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCamelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def lowercase ( self: Dict ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def lowercase ( self: List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def lowercase ( self: Tuple ) -> List[str]:
"""simple docstring"""
import PIL.Image
UpperCamelCase_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=_SCREAMING_SNAKE_CASE ) as mock_cast_to_python_objects:
UpperCamelCase_ = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
UpperCamelCase_ , UpperCamelCase_ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , _SCREAMING_SNAKE_CASE )
self.assertFalse(kwargs["optimize_list_casting"] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
UpperCamelCase_ = pa.BufferReader(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , pa.Buffer ) else pa.memory_map(UpperCamelCase_ )
UpperCamelCase_ = pa.ipc.open_stream(UpperCamelCase_ )
UpperCamelCase_ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
UpperCamelCase_ = pa.BufferOutputStream()
UpperCamelCase_ = pa.schema(UpperCamelCase_ ) if fields else None
with ArrowWriter(stream=UpperCamelCase_ , schema=UpperCamelCase_ , writer_batch_size=UpperCamelCase_ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( ) -> str:
UpperCamelCase_ = pa.BufferOutputStream()
UpperCamelCase_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=UpperCamelCase_ , features=UpperCamelCase_ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCamelCase_ = pa.BufferReader(output.getvalue() )
UpperCamelCase_ = pa.ipc.open_stream(UpperCamelCase_ )
UpperCamelCase_ = f.read_all()
UpperCamelCase_ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(UpperCamelCase_ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Any:
UpperCamelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase_ , writer_batch_size=UpperCamelCase_ , hash_salt="split_name" , check_duplicates=UpperCamelCase_ , ) as writer:
with pytest.raises(UpperCamelCase_ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
UpperCamelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase_ , writer_batch_size=UpperCamelCase_ , hash_salt="split_name" , check_duplicates=UpperCamelCase_ , ) as writer:
with pytest.raises(UpperCamelCase_ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Any:
UpperCamelCase_ = pa.BufferOutputStream()
with ArrowWriter(
stream=UpperCamelCase_ , writer_batch_size=UpperCamelCase_ , hash_salt="split_name" , check_duplicates=UpperCamelCase_ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
UpperCamelCase_ = pa.BufferOutputStream()
UpperCamelCase_ = pa.schema(UpperCamelCase_ ) if fields else None
with ArrowWriter(stream=UpperCamelCase_ , schema=UpperCamelCase_ , writer_batch_size=UpperCamelCase_ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
UpperCamelCase_ = pa.BufferOutputStream()
UpperCamelCase_ = pa.schema(UpperCamelCase_ ) if fields else None
with ArrowWriter(stream=UpperCamelCase_ , schema=UpperCamelCase_ , writer_batch_size=UpperCamelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
UpperCamelCase_ = pa.BufferOutputStream()
UpperCamelCase_ = pa.schema(UpperCamelCase_ ) if fields else None
with ArrowWriter(stream=UpperCamelCase_ , schema=UpperCamelCase_ , writer_batch_size=UpperCamelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCamelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(UpperCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()}
UpperCamelCase_ = os.path.join(UpperCamelCase_ , "test.arrow" )
with ArrowWriter(path=UpperCamelCase_ , schema=pa.schema(UpperCamelCase_ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(UpperCamelCase_ , metadata=writer._schema.metadata )
_check_output(UpperCamelCase_ , 1 )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Dict:
if pa.types.is_list(UpperCamelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if isinstance(lst[0] , UpperCamelCase_ ):
change_first_primitive_element_in_list(lst[0] , UpperCamelCase_ )
else:
UpperCamelCase_ = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
UpperCamelCase_ = pa.array(TypedSequence(UpperCamelCase_ , optimized_int_type=UpperCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
# in range
UpperCamelCase_ = pa.array(OptimizedTypedSequence(UpperCamelCase_ , col=UpperCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCamelCase_ = copy.deepcopy(UpperCamelCase_ )
UpperCamelCase_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = pa.array(OptimizedTypedSequence(UpperCamelCase_ , col=UpperCamelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
UpperCamelCase_ = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=UpperCamelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[int]:
UpperCamelCase_ = "mock://dataset-train.arrow"
with ArrowWriter(path=UpperCamelCase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(UpperCamelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(UpperCamelCase_ )
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = pa.BufferOutputStream()
with ParquetWriter(stream=UpperCamelCase_ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
UpperCamelCase_ , UpperCamelCase_ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCamelCase_ = pa.BufferReader(output.getvalue() )
UpperCamelCase_ = pq.read_table(UpperCamelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
import PIL.Image
UpperCamelCase_ = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(UpperCamelCase_ , format="png" )
UpperCamelCase_ = pa.BufferOutputStream()
with ParquetWriter(
stream=UpperCamelCase_ , features=Features({"image": Image()} ) , embed_local_files=UpperCamelCase_ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
UpperCamelCase_ = pa.BufferReader(output.getvalue() )
UpperCamelCase_ = pq.read_table(UpperCamelCase_ )
UpperCamelCase_ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase_ = pa.schema([pa.field("col_1" , pa.string() , nullable=UpperCamelCase_ )] )
UpperCamelCase_ = pa.BufferOutputStream()
with ArrowWriter(stream=UpperCamelCase_ ) as writer:
writer._build_writer(inferred_schema=UpperCamelCase_ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : List[str] = '''openai-gpt'''
_UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: int , _SCREAMING_SNAKE_CASE: int=40478 , _SCREAMING_SNAKE_CASE: Optional[Any]=512 , _SCREAMING_SNAKE_CASE: List[Any]=768 , _SCREAMING_SNAKE_CASE: str=12 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: Any=1e-5 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: str="cls_index" , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> int:
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = n_positions
UpperCamelCase_ = n_embd
UpperCamelCase_ = n_layer
UpperCamelCase_ = n_head
UpperCamelCase_ = afn
UpperCamelCase_ = resid_pdrop
UpperCamelCase_ = embd_pdrop
UpperCamelCase_ = attn_pdrop
UpperCamelCase_ = layer_norm_epsilon
UpperCamelCase_ = initializer_range
UpperCamelCase_ = summary_type
UpperCamelCase_ = summary_use_proj
UpperCamelCase_ = summary_activation
UpperCamelCase_ = summary_first_dropout
UpperCamelCase_ = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 371 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCAmelCase = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_lowerCAmelCase = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Dict = SavedModel()
A_ : Optional[Any] = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
A_ : Tuple = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
A_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
A_ : int = sorted(_snake_case )
A_ : Optional[int] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_snake_case ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*_snake_case ,sep="""\n""" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
_lowerCAmelCase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 569 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase__ ( snake_case ):
def _UpperCamelCase ( self ):
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A ,"""num_attention_heads""" ) )
self.parent.assertTrue(hasattr(A ,"""num_encoder_blocks""" ) )
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=64 ,A=3 ,A=4 ,A=[2, 2, 2, 2] ,A=[8, 4, 2, 1] ,A=[16, 32, 64, 128] ,A=[1, 4, 8, 16] ,A=[1, 2, 4, 8] ,A=True ,A=True ,A="gelu" ,A=0.1 ,A=0.1 ,A=0.02 ,A=3 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = num_encoder_blocks
UpperCAmelCase = sr_ratios
UpperCAmelCase = depths
UpperCAmelCase = hidden_sizes
UpperCAmelCase = downsampling_rates
UpperCAmelCase = num_attention_heads
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = SegformerModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
UpperCAmelCase = UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = SegformerForSemanticSegmentation(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase = model(A ,labels=A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = 1
UpperCAmelCase = SegformerForSemanticSegmentation(config=A )
model.to(A )
model.eval()
UpperCAmelCase = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(A )
UpperCAmelCase = model(A ,labels=A )
self.parent.assertGreater(result.loss ,0.0 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = SegformerModelTester(self )
UpperCAmelCase = SegformerConfigTester(self ,config_class=A )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.attentions
UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(A ) ,A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,A )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
UpperCAmelCase = len(A )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
self.assertEqual(out_len + 1 ,len(A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,A )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def _UpperCamelCase ( self ):
def check_hidden_states_output(A ,A ,A ):
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(A ) ,A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
def _UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A ):
continue
UpperCAmelCase = model_class(A )
model.to(A )
model.train()
UpperCAmelCase = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase = model(**A ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCamelCase ( self ):
pass
@slow
def _UpperCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SegformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,A ,atol=1e-4 ) )
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,A ,atol=1e-1 ) )
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A ,target_sizes=[(500, 300)] )
UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,A )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A )
UpperCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,A )
| 341 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
"""simple docstring"""
snake_case =PegasusConfig
snake_case ={}
snake_case ="""gelu"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=40 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ):
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =seq_length
_UpperCAmelCase =is_training
_UpperCAmelCase =use_labels
_UpperCAmelCase =vocab_size
_UpperCAmelCase =hidden_size
_UpperCAmelCase =num_hidden_layers
_UpperCAmelCase =num_attention_heads
_UpperCAmelCase =intermediate_size
_UpperCAmelCase =hidden_dropout_prob
_UpperCAmelCase =attention_probs_dropout_prob
_UpperCAmelCase =max_position_embeddings
_UpperCAmelCase =eos_token_id
_UpperCAmelCase =pad_token_id
_UpperCAmelCase =bos_token_id
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase =prepare_pegasus_inputs_dict(__A , __A , __A )
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
_UpperCAmelCase =TFPegasusModel(config=__A ).get_decoder()
_UpperCAmelCase =inputs_dict["input_ids"]
_UpperCAmelCase =input_ids[:1, :]
_UpperCAmelCase =inputs_dict["attention_mask"][:1, :]
_UpperCAmelCase =inputs_dict["head_mask"]
_UpperCAmelCase =1
# first forward pass
_UpperCAmelCase =model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
_UpperCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase =model(__A , attention_mask=__A )[0]
_UpperCAmelCase =model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1E-3 )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ) ->Optional[Any]:
if attention_mask is None:
_UpperCAmelCase =tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
snake_case =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
snake_case =(
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case =True
snake_case =False
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =TFPegasusModelTester(self )
_UpperCAmelCase =ConfigTester(self , config_class=__A )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
snake_case =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
snake_case =[
"""California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
snake_case ="""google/pegasus-xsum"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
_UpperCAmelCase =self.translate_src_text(**__A )
assert self.expected_text == generated_words
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
_UpperCAmelCase =self.tokenizer(self.src_text , **__A , padding=__A , return_tensors="tf" )
_UpperCAmelCase =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
_UpperCAmelCase =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )
return generated_words
@slow
def SCREAMING_SNAKE_CASE ( self ):
self._assert_generated_batch_equal_expected()
| 711 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="ylacombe/bark-small"
_UpperCAmelCase =tempfile.mkdtemp()
_UpperCAmelCase ="en_speaker_1"
_UpperCAmelCase ="This is a test string"
_UpperCAmelCase ="speaker_embeddings_path.json"
_UpperCAmelCase ="speaker_embeddings"
def SCREAMING_SNAKE_CASE ( self , **_snake_case ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCAmelCase =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCAmelCase =35
_UpperCAmelCase =2
_UpperCAmelCase =8
_UpperCAmelCase ={
"semantic_prompt": np.ones(_snake_case ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCAmelCase =os.path.join(self.tmpdirname , "file.npz" )
np.savez(_snake_case , **_snake_case )
_UpperCAmelCase =processor(text=self.input_string , voice_preset=_snake_case )
_UpperCAmelCase =inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCAmelCase =processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.get_tokenizer()
_UpperCAmelCase =BarkProcessor(tokenizer=_snake_case )
_UpperCAmelCase =processor(text=self.input_string )
_UpperCAmelCase =tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 592 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Any:
a : Any = tempfile.mkdtemp()
# fmt: off
a : List[str] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a : Optional[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
a : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
a : Optional[Any] = {"""unk_token""": """<unk>"""}
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case_ ) )
a : List[str] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
a : Union[str, Any] = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case_ , snake_case_ )
def __a ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __a ( self , **lowerCAmelCase__ ) -> Optional[Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def __a ( self , **lowerCAmelCase__ ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def __a ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a : Any = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ) -> int:
a : List[str] = self.get_tokenizer()
a : Optional[int] = self.get_rust_tokenizer()
a : str = self.get_image_processor()
a : Optional[int] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
a : List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
a : List[Any] = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
a : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def __a ( self ) -> Optional[Any]:
a : int = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a : Optional[int] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
a : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = self.get_image_processor()
a : Tuple = self.get_tokenizer()
a : Dict = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
a : List[str] = self.prepare_image_inputs()
a : Any = image_processor(snake_case_ , return_tensors="np" )
a : Union[str, Any] = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ) -> Optional[int]:
a : Tuple = self.get_image_processor()
a : Optional[int] = self.get_tokenizer()
a : str = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
a : Union[str, Any] = """lower newer"""
a : Union[str, Any] = processor(text=snake_case_ )
a : str = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ) -> List[str]:
a : str = self.get_image_processor()
a : Dict = self.get_tokenizer()
a : int = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
a : Optional[int] = """lower newer"""
a : List[str] = self.prepare_image_inputs()
a : Tuple = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __a ( self ) -> Dict:
a : int = self.get_image_processor()
a : Optional[Any] = self.get_tokenizer()
a : Any = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
a : Any = self.prepare_image_inputs()
a : Dict = self.prepare_image_inputs()
a : int = processor(images=snake_case_ , visual_prompt=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __a ( self ) -> Optional[Any]:
a : str = self.get_image_processor()
a : Union[str, Any] = self.get_tokenizer()
a : Any = CLIPSegProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
a : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a : Union[str, Any] = processor.batch_decode(snake_case_ )
a : str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 633 | '''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = None , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
super().__init__(
features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
__UpperCAmelCase: Optional[int] = Generator(
cache_dir=snake_case_ , features=snake_case_ , generator=snake_case_ , gen_kwargs=snake_case_ , **snake_case_ , )
def lowercase_ ( self ):
'''simple docstring'''
if self.streaming:
__UpperCAmelCase: List[str] = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
__UpperCAmelCase: Union[str, Any] = None
__UpperCAmelCase: str = None
__UpperCAmelCase: Tuple = None
__UpperCAmelCase: Union[str, Any] = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
__UpperCAmelCase: List[str] = self.builder.as_dataset(
split="""train""" , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset | 523 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel(),logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel(),logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel(),logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel(),logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : Tuple = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_A ) as cl:
logger.warning(_A )
self.assertEqual(cl.out,msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_A ) as cl:
logger.warning(_A )
self.assertEqual(cl.out,"" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_A ) as cl:
logger.warning(_A )
self.assertEqual(cl.out,msg + "\n" )
# restore to the original level
logging.set_verbosity(_A )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.getenv("TRANSFORMERS_VERBOSITY",_A )
SCREAMING_SNAKE_CASE_ : Any = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_verbosity()
self.assertEqual(
_A,_A,F'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}',)
# restore to the original level
SCREAMING_SNAKE_CASE_ : List[str] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : Any = logging.logging.getLogger()
with CaptureLogger(_A ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error",cl.out )
# no need to restore as nothing was changed
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : Optional[int] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(_A ) as cl:
logger.warning_advice(_A )
self.assertEqual(cl.out,"" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_A ) as cl:
logger.warning_advice(_A )
self.assertEqual(cl.out,msg + "\n" )
def _snake_case ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 316 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 316 | 1 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
while a != 0:
_snake_case , _snake_case = b % a, a
return b
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if gcd(lowerCAmelCase_ , lowerCAmelCase_ ) != 1:
_snake_case = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase_ )
_snake_case , _snake_case , _snake_case = 1, 0, a
_snake_case , _snake_case , _snake_case = 0, 1, m
while va != 0:
_snake_case = ua // va
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 103 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> Optional[Any]:
if isinstance(__UpperCamelCase , torch.Tensor ):
return image
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCAmelCase_ = np.concatenate(__UpperCamelCase , axis=0 )
UpperCAmelCase_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0
UpperCAmelCase_ = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase_ = 2.0 * image - 1.0
UpperCAmelCase_ = torch.from_numpy(__UpperCamelCase )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ = torch.cat(__UpperCamelCase , dim=0 )
return image
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=0.9_995 ) -> List[str]:
if not isinstance(__UpperCamelCase , np.ndarray ):
UpperCAmelCase_ = True
UpperCAmelCase_ = va.device
UpperCAmelCase_ = va.cpu().numpy()
UpperCAmelCase_ = va.cpu().numpy()
UpperCAmelCase_ = np.sum(va * va / (np.linalg.norm(__UpperCamelCase ) * np.linalg.norm(__UpperCamelCase )) )
if np.abs(__UpperCamelCase ) > DOT_THRESHOLD:
UpperCAmelCase_ = (1 - t) * va + t * va
else:
UpperCAmelCase_ = np.arccos(__UpperCamelCase )
UpperCAmelCase_ = np.sin(__UpperCamelCase )
UpperCAmelCase_ = theta_a * t
UpperCAmelCase_ = np.sin(__UpperCamelCase )
UpperCAmelCase_ = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase_ = sin_theta_t / sin_theta_a
UpperCAmelCase_ = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase_ = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
return va
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Optional[int]:
UpperCAmelCase_ = F.normalize(__UpperCamelCase , dim=-1 )
UpperCAmelCase_ = F.normalize(__UpperCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) -> Optional[int]:
for param in model.parameters():
UpperCAmelCase_ = value
class a ( _A ):
'''simple docstring'''
def __init__( self : List[str] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __snake_case : CLIPFeatureExtractor , __snake_case : Dict=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=None , ):
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , clip_model=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , feature_extractor=__snake_case , coca_model=__snake_case , coca_tokenizer=__snake_case , coca_transform=__snake_case , )
UpperCAmelCase_ = (
feature_extractor.size
if isinstance(feature_extractor.size , __snake_case )
else feature_extractor.size['''shortest_edge''']
)
UpperCAmelCase_ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __snake_case )
set_requires_grad(self.clip_model , __snake_case )
def lowerCamelCase_ ( self : Dict , __snake_case : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowerCamelCase_ ( self : str ):
self.enable_attention_slicing(__snake_case )
def lowerCamelCase_ ( self : List[Any] ):
set_requires_grad(self.vae , __snake_case )
def lowerCamelCase_ ( self : int ):
set_requires_grad(self.vae , __snake_case )
def lowerCamelCase_ ( self : str ):
set_requires_grad(self.unet , __snake_case )
def lowerCamelCase_ ( self : List[Any] ):
set_requires_grad(self.unet , __snake_case )
def lowerCamelCase_ ( self : Any , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[str] ):
# get the original timestep using init_timestep
UpperCAmelCase_ = min(int(num_inference_steps * strength ) , __snake_case )
UpperCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self : int , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : str=None ):
if not isinstance(__snake_case , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(__snake_case )}' )
UpperCAmelCase_ = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__snake_case )
]
UpperCAmelCase_ = torch.cat(__snake_case , dim=0 )
else:
UpperCAmelCase_ = self.vae.encode(__snake_case ).latent_dist.sample(__snake_case )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 0.18_215 * init_latents
UpperCAmelCase_ = init_latents.repeat_interleave(__snake_case , dim=0 )
UpperCAmelCase_ = randn_tensor(init_latents.shape , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
UpperCAmelCase_ = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = init_latents
return latents
def lowerCamelCase_ ( self : Dict , __snake_case : Dict ):
UpperCAmelCase_ = self.coca_transform(__snake_case ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase_ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCAmelCase_ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Any , __snake_case : List[Any] ):
UpperCAmelCase_ = self.feature_extractor.preprocess(__snake_case )
UpperCAmelCase_ = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase_ = self.clip_model.get_image_features(__snake_case )
UpperCAmelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ = image_embeddings_clip.repeat_interleave(__snake_case , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def lowerCamelCase_ ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any] , ):
UpperCAmelCase_ = latents.detach().requires_grad_()
UpperCAmelCase_ = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase_ = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase_ = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase_ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase_ = torch.sqrt(__snake_case )
UpperCAmelCase_ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __snake_case ):
UpperCAmelCase_ = self.scheduler.sigmas[index]
UpperCAmelCase_ = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 1 / 0.18_215 * sample
UpperCAmelCase_ = self.vae.decode(__snake_case ).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = transforms.Resize(self.feature_extractor_size )(__snake_case )
UpperCAmelCase_ = self.normalize(__snake_case ).to(latents.dtype )
UpperCAmelCase_ = self.clip_model.get_image_features(__snake_case )
UpperCAmelCase_ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__snake_case )
UpperCAmelCase_ = spherical_dist_loss(__snake_case , __snake_case ).mean() * clip_guidance_scale
UpperCAmelCase_ = -torch.autograd.grad(__snake_case , __snake_case )[0]
if isinstance(self.scheduler , __snake_case ):
UpperCAmelCase_ = latents.detach() + grads * (sigma**2)
UpperCAmelCase_ = noise_pred_original
else:
UpperCAmelCase_ = noise_pred_original - torch.sqrt(__snake_case ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Tuple , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Optional[int] = 5_12 , __snake_case : Optional[int] = 5_12 , __snake_case : float = 0.6 , __snake_case : Optional[int] = 50 , __snake_case : Optional[float] = 7.5 , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[float] = 1_00 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , __snake_case : float = 0.8 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , ):
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(__snake_case )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(__snake_case , torch.Generator ) and batch_size > 1:
UpperCAmelCase_ = [generator] + [None] * (batch_size - 1)
UpperCAmelCase_ = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCAmelCase_ = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase_ = ''', '''.join(__snake_case )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__snake_case ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCAmelCase_ = self.get_image_description(__snake_case )
if style_prompt is None:
if len(__snake_case ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
UpperCAmelCase_ = self.get_image_description(__snake_case )
# get prompt text embeddings for content and style
UpperCAmelCase_ = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_ = self.tokenizer(
__snake_case , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=__snake_case , return_tensors='''pt''' , )
UpperCAmelCase_ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase_ = slerp(__snake_case , __snake_case , __snake_case )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ = text_embeddings.repeat_interleave(__snake_case , dim=0 )
# set timesteps
UpperCAmelCase_ = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_offset:
UpperCAmelCase_ = 1
self.scheduler.set_timesteps(__snake_case , **__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ , UpperCAmelCase_ = self.get_timesteps(__snake_case , __snake_case , self.device )
UpperCAmelCase_ = timesteps[:1].repeat(__snake_case )
# Preprocess image
UpperCAmelCase_ = preprocess(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
UpperCAmelCase_ = preprocess(__snake_case , __snake_case , __snake_case )
UpperCAmelCase_ = self.prepare_latents(
__snake_case , __snake_case , __snake_case , text_embeddings.dtype , self.device , __snake_case )
UpperCAmelCase_ = slerp(__snake_case , __snake_case , __snake_case )
if clip_guidance_scale > 0:
UpperCAmelCase_ = self.get_clip_image_embeddings(__snake_case , __snake_case )
UpperCAmelCase_ = self.get_clip_image_embeddings(__snake_case , __snake_case )
UpperCAmelCase_ = slerp(
__snake_case , __snake_case , __snake_case )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ = content_text_input.input_ids.shape[-1]
UpperCAmelCase_ = self.tokenizer([''''''] , padding='''max_length''' , max_length=__snake_case , return_tensors='''pt''' )
UpperCAmelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase_ = uncond_embeddings.repeat_interleave(__snake_case , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase_ = torch.randn(__snake_case , generator=__snake_case , device='''cpu''' , dtype=__snake_case ).to(
self.device )
else:
UpperCAmelCase_ = torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
UpperCAmelCase_ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
# check if the scheduler accepts generator
UpperCAmelCase_ = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase_ = generator
with self.progress_bar(total=__snake_case ):
for i, t in enumerate(__snake_case ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ = self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
UpperCAmelCase_ = self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2 )
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase_ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase_ , UpperCAmelCase_ = self.cond_fn(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase_ = 1 / 0.18_215 * latents
UpperCAmelCase_ = self.vae.decode(__snake_case ).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 144 | 0 |
"""simple docstring"""
def lowercase ( _snake_case : list[list] ) ->list[list]:
"""simple docstring"""
__snake_case : Dict = current_set.copy()
for row_index, row in enumerate(_snake_case ):
__snake_case : str = row[0]
for column_index, column in enumerate(_snake_case ):
if magnitude == 0:
__snake_case : str = column
continue
__snake_case : str = column / magnitude
# Subtract to cancel term
__snake_case : Union[str, Any] = current_set[0]
__snake_case : int = [first_row]
__snake_case : Tuple = current_set[1::]
for row in current_set:
__snake_case : Optional[int] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_snake_case )
continue
for column_index in range(len(_snake_case ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_snake_case )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__snake_case : Tuple = final_set[0]
__snake_case : Tuple = []
__snake_case : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__snake_case : str = simplify(_snake_case )
for i in range(len(_snake_case ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _snake_case )
__snake_case : List[Any] = resultant
return final_set
def lowercase ( _snake_case : list[list] ) ->list:
"""simple docstring"""
if len(_snake_case ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
__snake_case : Optional[int] = len(_snake_case ) + 1
if any(len(_snake_case ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(_snake_case , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(_snake_case ) == 1:
return [equations[0][-1] / equations[0][0]]
__snake_case : int = equations.copy()
if any(0 in row for row in data_set ):
__snake_case : List[Any] = data_set.copy()
__snake_case : int = []
for row_index, row in enumerate(_snake_case ):
if 0 not in row:
__snake_case : Tuple = data_set.pop(_snake_case )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , _snake_case )
__snake_case : Tuple = data_set.copy()
__snake_case : Dict = simplify(_snake_case )
__snake_case : Optional[int] = simplified[::-1]
__snake_case : list = []
for row in simplified:
__snake_case : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__snake_case : Any = row.copy()[: len(_snake_case ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_snake_case ) == 0:
solutions.append(0 )
continue
__snake_case : Tuple = temp_row[1::]
__snake_case : List[str] = temp_row[::-1]
for column_index, column in enumerate(_snake_case ):
current_solution -= column * solutions[column_index]
solutions.append(_snake_case )
__snake_case : Optional[Any] = []
for item in solutions:
final.append(float(round(_snake_case , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 229 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE : Any = 3
def lowercase ( _snake_case : int ) ->int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
__snake_case : List[Any] = random.randrange(3 , _snake_case )
if pow(_snake_case , 2 , _snake_case ) == 1:
continue
if pow(_snake_case , _snake_case , _snake_case ) == 1:
continue
return g
def lowercase ( _snake_case : int ) ->tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
__snake_case : List[str] = rabin_miller.generate_large_prime(_snake_case ) # select large prime number.
__snake_case : Optional[int] = primitive_root(_snake_case ) # one primitive root on modulo p.
__snake_case : Dict = random.randrange(3 , _snake_case ) # private_key -> have to be greater than 2 for safety.
__snake_case : Any = cryptomath.find_mod_inverse(pow(_snake_case , _snake_case , _snake_case ) , _snake_case )
__snake_case : Union[str, Any] = (key_size, e_a, e_a, p)
__snake_case : Dict = (key_size, d)
return public_key, private_key
def lowercase ( _snake_case : str , _snake_case : int ) ->None:
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__snake_case , __snake_case : Optional[int] = generate_key(_snake_case )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def lowercase ( ) ->None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 229 | 1 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : int = 1_01 ):
"""simple docstring"""
A__ = length
def __len__( self : str ):
"""simple docstring"""
return self.length
def __getitem__( self : Dict , _snake_case : List[Any] ):
"""simple docstring"""
return i
class __lowerCAmelCase :
"""simple docstring"""
def __call__( self : Tuple , _snake_case : Optional[Any] ):
"""simple docstring"""
return {"input_ids": torch.tensor(SCREAMING_SNAKE_CASE_ ), "labels": torch.tensor(SCREAMING_SNAKE_CASE_ )}
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A__ = nn.Linear(1_20 , 80 )
def _a ( self : int , _snake_case : Union[str, Any] , _snake_case : Tuple=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
@require_torch_neuroncore
def _a ( self : int ):
"""simple docstring"""
A__ = F'''--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '''.split()
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''--output_dir {output_dir}'''.split()
A__ = ['torchrun'] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
@require_torch_multi_gpu
def _a ( self : Tuple ):
"""simple docstring"""
A__ = F'''--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '''.split()
A__ = self.get_auto_remove_tmp_dir()
A__ = F'''--output_dir {output_dir}'''.split()
A__ = ['torchrun'] + distributed_args + args
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE__ = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
f'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
SCREAMING_SNAKE_CASE__ = DummyDataset(dataset_length)
def A ( __UpperCamelCase ) -> Dict:
A__ = list(range(len(lowercase__ ) ) )
A__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
SCREAMING_SNAKE_CASE__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE__ = None
| 9 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase__ )
trainer.save_metrics('''train''' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowercase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 0 |
__lowerCamelCase = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCamelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def _snake_case ( __snake_case ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _snake_case ( __snake_case ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def _snake_case ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ : str = "Morse code here!"
print(__snake_case )
UpperCAmelCase_ : Optional[int] = encrypt(__snake_case )
print(__snake_case )
UpperCAmelCase_ : str = decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 455 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """lxmert"""
_lowerCamelCase = {}
def __init__( self ,lowercase=30522 ,lowercase=768 ,lowercase=12 ,lowercase=9500 ,lowercase=1600 ,lowercase=400 ,lowercase=3072 ,lowercase="gelu" ,lowercase=0.1 ,lowercase=0.1 ,lowercase=512 ,lowercase=2 ,lowercase=0.02 ,lowercase=1E-12 ,lowercase=9 ,lowercase=5 ,lowercase=5 ,lowercase=2048 ,lowercase=4 ,lowercase=6.67 ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Any = num_qa_labels
UpperCAmelCase_ : str = num_object_labels
UpperCAmelCase_ : Dict = num_attr_labels
UpperCAmelCase_ : Tuple = l_layers
UpperCAmelCase_ : Tuple = x_layers
UpperCAmelCase_ : int = r_layers
UpperCAmelCase_ : Optional[Any] = visual_feat_dim
UpperCAmelCase_ : List[Any] = visual_pos_dim
UpperCAmelCase_ : int = visual_loss_normalizer
UpperCAmelCase_ : str = task_matched
UpperCAmelCase_ : str = task_mask_lm
UpperCAmelCase_ : int = task_obj_predict
UpperCAmelCase_ : List[str] = task_qa
UpperCAmelCase_ : Optional[int] = visual_obj_loss
UpperCAmelCase_ : List[str] = visual_attr_loss
UpperCAmelCase_ : str = visual_feat_loss
UpperCAmelCase_ : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowercase)
| 455 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'mra'
def __init__( self , __lowerCAmelCase=5_0265 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase="absolute" , __lowerCAmelCase=4 , __lowerCAmelCase="full" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = block_per_row
lowercase = approx_mode
lowercase = initial_prior_first_n_blocks
lowercase = initial_prior_diagonal_n_blocks
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = 'deta'
snake_case__ : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=900 , __lowerCAmelCase=2048 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=1024 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1.0 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=True , __lowerCAmelCase=300 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.2_5 , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
lowercase = backbone_config
lowercase = num_queries
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = auxiliary_loss
lowercase = position_embedding_type
# deformable attributes
lowercase = num_feature_levels
lowercase = encoder_n_points
lowercase = decoder_n_points
lowercase = two_stage
lowercase = two_stage_num_proposals
lowercase = with_box_refine
lowercase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = eos_coefficient
lowercase = focal_alpha
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ):
"""simple docstring"""
return self.d_model
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 359 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __SCREAMING_SNAKE_CASE :
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
return None
class __SCREAMING_SNAKE_CASE :
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
return None
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
__SCREAMING_SNAKE_CASE : Optional[int] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , "tf" , 12 , **__UpperCamelCase )
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , "pt" , 12 , **__UpperCamelCase )
@require_torch
@slow
def UpperCAmelCase__ ( self : Any ):
from transformers import BertModel
_UpperCAmelCase = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__UpperCamelCase ) )
vocab_file.flush()
_UpperCAmelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_UpperCAmelCase = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase , "pt" , 12 , __UpperCamelCase )
@require_tf
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_UpperCAmelCase = self._test_export(__UpperCamelCase , "tf" , 12 , **__UpperCamelCase )
_UpperCAmelCase = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_UpperCAmelCase = self._test_export(__UpperCamelCase , "pt" , 12 , **__UpperCamelCase )
_UpperCAmelCase = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Any=None , **__UpperCamelCase : Optional[Any] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
_UpperCAmelCase = Path(__UpperCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase__ ( self : List[Any] ):
from transformers import BertModel
_UpperCAmelCase = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_UpperCAmelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase__ ( self : List[str] ):
from transformers import TFBertModel
_UpperCAmelCase = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
_UpperCAmelCase = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , "tf" )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int ):
_UpperCAmelCase = FeatureExtractionPipeline(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = infer_shapes(__UpperCamelCase , __UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] , __UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = ["input_ids", "attention_mask", "token_type_ids"]
_UpperCAmelCase = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
_UpperCAmelCase , _UpperCAmelCase = ensure_valid_input(FuncContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) , set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_UpperCAmelCase , _UpperCAmelCase = ensure_valid_input(FuncNonContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) , 1 )
self.assertEqual(len(__UpperCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 129 |
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase = True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
re.sub("<n>" , "" , _lowerCAmelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
| 129 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = (CMStochasticIterativeScheduler,)
__magic_name__ = 10
def lowerCAmelCase__ ( self , **snake_case_ ):
_A = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**snake_case_ )
return config
def lowerCAmelCase__ ( self ):
_A = 10
_A = self.get_scheduler_config()
_A = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
_A = scheduler.timesteps[0]
_A = scheduler.timesteps[1]
_A = self.dummy_sample
_A = 0.1 * sample
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = 1
scheduler.set_timesteps(snake_case_ )
_A = scheduler.timesteps
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
_A = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
_A = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(snake_case_ ) )
_A = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
_A = scheduler.timesteps
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_A = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
_A = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(snake_case_ ) )
_A = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [39, 30, 12, 1, 0]
_A = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 27 |
'''simple docstring'''
from math import factorial
__A : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
if not isinstance(A__ , A__ ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A__ ) )
def UpperCamelCase_ ( A__ : int = 60 , A__ : int = 1_00_00_00 ):
'''simple docstring'''
if not isinstance(A__ , A__ ) or not isinstance(A__ , A__ ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCAmelCase_ : Optional[int] = 0
# the cached sizes of the previous chains
lowerCAmelCase_ : dict[int, int] = {}
for start_chain_element in range(1 , A__ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase_ : List[str] = set()
lowerCAmelCase_ : Optional[int] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase_ : int = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A__ )
chain_set_length += 1
lowerCAmelCase_ : Union[str, Any] = digit_factorial_sum(A__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase_ : int = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution()}''')
| 275 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def snake_case__ ( a , a , a ) -> List[Any]:
'''simple docstring'''
snake_case__ = state_dict.pop(a )
snake_case__ = val
def snake_case__ ( a ) -> Any:
'''simple docstring'''
snake_case__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case__ = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case__ = value
else:
snake_case__ = value
return new_state_dict
def snake_case__ ( a ) -> List[Any]:
'''simple docstring'''
snake_case__ = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[:256, :]
snake_case__ = in_proj_bias[:256]
snake_case__ = in_proj_weight[256:512, :]
snake_case__ = in_proj_bias[256:512]
snake_case__ = in_proj_weight[-256:, :]
snake_case__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
snake_case__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ = in_proj_weight[:256, :]
snake_case__ = in_proj_bias[:256]
snake_case__ = in_proj_weight[256:512, :]
snake_case__ = in_proj_bias[256:512]
snake_case__ = in_proj_weight[-256:, :]
snake_case__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
snake_case__ = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
snake_case__ = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
snake_case__ = in_proj_weight_cross_attn[:256, :]
snake_case__ = in_proj_bias_cross_attn[:256]
snake_case__ = in_proj_weight_cross_attn[256:512, :]
snake_case__ = in_proj_bias_cross_attn[256:512]
snake_case__ = in_proj_weight_cross_attn[-256:, :]
snake_case__ = in_proj_bias_cross_attn[-256:]
def snake_case__ ( a , a ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ = image.size
snake_case__ = max(a , a )
snake_case__ = 800 if """detection""" in checkpoint_url else 1000
snake_case__ = target_max_size / current_max_size
snake_case__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( a ) -> Optional[Any]:
'''simple docstring'''
snake_case__ = F.to_tensor(a )
snake_case__ = F.normalize(a , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def snake_case__ ( a , a , a ) -> Union[str, Any]:
'''simple docstring'''
logger.info("""Converting model...""" )
# load original state dict
snake_case__ = torch.hub.load_state_dict_from_url(a , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(a , a , a )
snake_case__ = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case__ = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case__ = state_dict.pop(a )
snake_case__ = val
# create HuggingFace model and load state dict
snake_case__ = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
snake_case__ = 15
snake_case__ = 2
snake_case__ = {0: """table""", 1: """table rotated"""}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
else:
snake_case__ = 125
snake_case__ = 6
snake_case__ = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
snake_case__ = TableTransformerForObjectDetection(a )
model.load_state_dict(a )
model.eval()
# verify our conversion
snake_case__ = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
snake_case__ = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=a )
snake_case__ = Image.open(a ).convert("""RGB""" )
snake_case__ = normalize(resize(a , a ) ).unsqueeze(0 )
snake_case__ = model(a )
if "detection" in checkpoint_url:
snake_case__ = (1, 15, 3)
snake_case__ = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
snake_case__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
snake_case__ = (1, 125, 7)
snake_case__ = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
snake_case__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
snake_case__ = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(a )
image_processor.push_to_hub(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 566 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a__ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
a__ = '''sshleifer/student_marian_en_ro_6_1'''
a__ = '''sshleifer/tiny-mbart'''
@require_torch
class __magic_name__( __lowerCAmelCase ):
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : Any=False , __UpperCamelCase : int=None , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=True , ):
'''simple docstring'''
snake_case__ = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , extra_args_str=__UpperCamelCase , predict_with_generate=__UpperCamelCase , do_train=__UpperCamelCase , do_eval=__UpperCamelCase , do_predict=__UpperCamelCase , )
snake_case__ = TrainerState.load_from_json(os.path.join(__UpperCamelCase , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case__ = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCamelCase )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase )
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__UpperCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__UpperCamelCase )
@require_apex
@require_torch_gpu
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case__ = experiments[experiment_id]
snake_case__ = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case__ = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__UpperCamelCase , extra_args_str=data["""extra_args_str"""] )
snake_case__ = len(re.findall(__UpperCamelCase , cl.err ) )
self.assertEqual(__UpperCamelCase , data["""n_matches"""] )
@slow
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__UpperCamelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__UpperCamelCase , )
# Check metrics
snake_case__ = TrainerState.load_from_json(os.path.join(__UpperCamelCase , """trainer_state.json""" ) ).log_history
snake_case__ = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ = eval_metrics[0]
snake_case__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCamelCase )
# test if do_predict saves generations and metrics
snake_case__ = os.listdir(__UpperCamelCase )
snake_case__ = {os.path.basename(__UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__UpperCamelCase : str ) -> Tuple[int, float]:
snake_case__ = """--skip_memory_metrics 0"""
snake_case__ = self.run_trainer(
max_len=1_2_8 , model_name=__UpperCamelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__UpperCamelCase , distributed=__UpperCamelCase , extra_args_str=__UpperCamelCase , do_eval=__UpperCamelCase , do_predict=__UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
snake_case__ = TrainerState.load_from_json(Path(__UpperCamelCase , """trainer_state.json""" ) ).log_history
snake_case__ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0 )
snake_case__ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0 )
snake_case__ = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__UpperCamelCase , __UpperCamelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__UpperCamelCase , __UpperCamelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__UpperCamelCase , __UpperCamelCase , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def __lowerCAmelCase( self : str , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : float = 3E-3 , __UpperCamelCase : str = "adafactor" , __UpperCamelCase : bool = False , __UpperCamelCase : str = None , __UpperCamelCase : int = 0 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = None , ):
'''simple docstring'''
snake_case__ = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
snake_case__ = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__UpperCamelCase )}
""".split()
snake_case__ = """
--do_predict
""".split()
snake_case__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ = get_gpu_count()
snake_case__ = get_torch_dist_unique_port()
snake_case__ = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
snake_case__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
else:
snake_case__ = ["""run_translation.py"""] + args
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
main()
return output_dir | 566 | 1 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a_ : Optional[int] = """naver-clova-ix/donut-base"""
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = DonutProcessor.from_pretrained(UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
lowerCamelCase_ = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
lowerCamelCase_ = self.processor.tokenajson(UpperCamelCase )
self.assertDictEqual(UpperCamelCase , UpperCamelCase )
| 675 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ = model
lowerCamelCase_ = 2
lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
# load longformer model from model identifier
lowerCamelCase_ = LongformerModel.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = LightningModel(UpperCAmelCase_ )
lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(UpperCAmelCase_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCAmelCase_ )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 675 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _UpperCamelCase( _snake_case ):
__SCREAMING_SNAKE_CASE : Optional[int] = '''mgp-str'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=[3_2, 1_2_8] , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : List[str]=2_7 , SCREAMING_SNAKE_CASE__ : int=3_8 , SCREAMING_SNAKE_CASE__ : List[Any]=5_0_2_5_7 , SCREAMING_SNAKE_CASE__ : Any=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=4.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Tuple=1e-5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__a : List[Any] = image_size
__a : str = patch_size
__a : Union[str, Any] = num_channels
__a : Tuple = max_token_length
__a : int = num_character_labels
__a : Union[str, Any] = num_bpe_labels
__a : Optional[Any] = num_wordpiece_labels
__a : Any = hidden_size
__a : Optional[int] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : int = mlp_ratio
__a : Any = distilled
__a : List[str] = layer_norm_eps
__a : Dict = drop_rate
__a : Union[str, Any] = qkv_bias
__a : List[Any] = attn_drop_rate
__a : Union[str, Any] = drop_path_rate
__a : Union[str, Any] = output_aa_attentions
__a : List[Any] = initializer_range
| 701 |
from __future__ import annotations
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] ):
if not nums:
return 0
__a : Any = nums[0]
__a : List[Any] = 0
for num in nums[1:]:
__a , __a : List[Any] = (
max_excluding + num,
max(lowerCamelCase_ , lowerCamelCase_ ),
)
return max(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577 | 0 |
__UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__ : float ) -> str:
assert type(snake_case__ ) in (int, float) and decimal == int(snake_case__ )
UpperCamelCase : Any = int(snake_case__ )
UpperCamelCase : Any = ''
UpperCamelCase : List[str] = False
if decimal < 0:
UpperCamelCase : Optional[Any] = True
decimal *= -1
while decimal > 0:
UpperCamelCase , UpperCamelCase : Union[str, Any] = divmod(snake_case__ , 16 )
UpperCamelCase : int = values[remainder] + hexadecimal
UpperCamelCase : int = '0x' + hexadecimal
if negative:
UpperCamelCase : str = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : str=1.0 , snake_case__ : int=None , snake_case__ : Union[str, Any]=None ) -> Any:
if rng is None:
UpperCamelCase : int = global_rng
UpperCamelCase : Union[str, Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=2000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=1_6000, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCamelCase : Dict = parent
UpperCamelCase : Dict = batch_size
UpperCamelCase : Any = min_seq_length
UpperCamelCase : Optional[int] = max_seq_length
UpperCamelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Tuple = feature_size
UpperCamelCase : Any = padding_value
UpperCamelCase : Tuple = sampling_rate
UpperCamelCase : Optional[Any] = return_attention_mask
UpperCamelCase : Optional[Any] = do_normalize
def snake_case_ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False ) -> Union[str, Any]:
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase : str = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Any = WavaVecaFeatureExtractor
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = WavaVecaFeatureExtractionTester(self )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE_, axis=0 ) - 1 ) < 1e-3 ) )
def snake_case_ ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Any = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : List[Any] = feat_extract(speech_inputs[0], return_tensors='np' ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0], return_tensors='np' ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test batched
UpperCamelCase : List[Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : int = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : Optional[int] = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
UpperCamelCase : Dict = feat_extract(SCREAMING_SNAKE_CASE_, return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, atol=1e-3 ) )
def snake_case_ ( self ) -> int:
UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : Any = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = feat_extract(SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Tuple = range(800, 1400, 200 )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in lengths]
UpperCamelCase : int = ['longest', 'max_length', 'do_not_pad']
UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Tuple = feat_extract(SCREAMING_SNAKE_CASE_, max_length=SCREAMING_SNAKE_CASE_, padding=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : int = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='max_length', return_tensors='np' )
UpperCamelCase : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=1000, padding='longest', return_tensors='np' )
UpperCamelCase : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
UpperCamelCase : str = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase : Any = feat_extract(
SCREAMING_SNAKE_CASE_, truncation=SCREAMING_SNAKE_CASE_, max_length=2000, padding='longest', return_tensors='np' )
UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def snake_case_ ( self ) -> str:
import torch
UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : Dict = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : Union[str, Any] = feature_extractor.pad([{'input_values': inputs}], return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : Any = feature_extractor.pad([{'input_values': inputs}], return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def snake_case_ ( self ) -> Tuple:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
UpperCamelCase : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask, config.feat_extract_norm == 'layer' )
| 40 | 1 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__: int = []
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> bool:
for i in range(len(_lowerCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_lowerCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_lowerCAmelCase , -1 , -1 ) , range(_lowerCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_lowerCAmelCase , -1 , -1 ) , range(_lowerCAmelCase , len(_lowerCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ) -> bool:
if row >= len(_lowerCAmelCase ):
solution.append(_lowerCAmelCase )
printboard(_lowerCAmelCase )
print()
return True
for i in range(len(_lowerCAmelCase ) ):
if is_safe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Dict = 1
solve(_lowerCAmelCase , row + 1 )
UpperCAmelCase : List[Any] = 0
return False
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> None:
for i in range(len(_lowerCAmelCase ) ):
for j in range(len(_lowerCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
UpperCamelCase__: List[Any] = 8
UpperCamelCase__: Optional[int] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 711 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : str = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Dict:
with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con:
UpperCAmelCase : Any = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Optional[int]:
UpperCAmelCase : Optional[int] = tmp_path / '''cache'''
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Any = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase : List[str] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : Any = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Tuple = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
with pytest.raises(_lowerCAmelCase ):
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 528 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase_ :
def __init__( self : Tuple , _lowercase : List[str] , _lowercase : Any=1_3 , _lowercase : Optional[int]=2 , _lowercase : int=2_4 , _lowercase : Optional[int]=1_6 , _lowercase : Dict=True , _lowercase : List[Any]=True , _lowercase : Tuple=3_2 , _lowercase : List[Any]=5 , _lowercase : List[Any]=4 , _lowercase : Tuple=3_7 , _lowercase : Tuple="gelu" , _lowercase : Tuple=0.1 , _lowercase : List[str]=0.1 , _lowercase : Any=1_0 , _lowercase : List[Any]=0.02 , _lowercase : List[str]=None , _lowercase : Dict=2 , _lowercase : Union[str, Any]=2 , ):
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : List[Any] = max_length
lowerCAmelCase__ : Optional[int] = num_mel_bins
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : Union[str, Any] = use_labels
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = type_sequence_label_size
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Union[str, Any] = scope
lowerCAmelCase__ : Union[str, Any] = frequency_stride
lowerCAmelCase__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCAmelCase__ : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowerCAmelCase__ : Any = (self.max_length - self.patch_size) // self.time_stride + 1
lowerCAmelCase__ : str = frequency_out_dimension * time_out_dimension
lowerCAmelCase__ : Optional[int] = num_patches + 2
def _lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowerCAmelCase__ : int = None
if self.use_labels:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Tuple = self.get_config()
return config, input_values, labels
def _lowerCAmelCase ( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Optional[Any] ):
lowerCAmelCase__ : List[Any] = ASTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase__ : Dict = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Dict = config_and_inputs
lowerCAmelCase__ : Dict = {"input_values": input_values}
return config, inputs_dict
@require_torch
class lowercase_ ( a_ , a_ , unittest.TestCase ):
__magic_name__ : Dict = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Any = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__magic_name__ : Dict = False
__magic_name__ : Dict = False
__magic_name__ : Tuple = False
__magic_name__ : List[Any] = False
def _lowerCAmelCase ( self : Optional[int] , _lowercase : Dict , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : str , _lowercase : Any ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCAmelCase ( self : int ):
lowerCAmelCase__ : str = ASTModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=3_7 )
def _lowerCAmelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowerCAmelCase ( self : Tuple ):
pass
def _lowerCAmelCase ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def _lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class(_lowercase )
lowerCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Any = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def _lowerCAmelCase ( self : str ):
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@slow
def _lowerCAmelCase ( self : Optional[Any] ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[Any] = ASTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = torchaudio.load(lowerCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase_ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : List[Any] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[Any] = self.default_feature_extractor
lowerCAmelCase__ : Union[str, Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(_lowercase )
lowerCAmelCase__ : Optional[int] = self.default_feature_extractor
lowerCAmelCase__ , lowerCAmelCase__ : str = prepare_audio()
lowerCAmelCase__ : List[str] = audio.squeeze().numpy()
lowerCAmelCase__ : Union[str, Any] = feature_extractor(_lowercase , sampling_rate=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**_lowercase )
# verify the logits
lowerCAmelCase__ : int = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 308 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Any ) -> Any:
# Initialise PyTorch model
lowerCAmelCase__ : Any = MobileBertConfig.from_json_file(lowerCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
lowerCAmelCase__ : str = MobileBertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
lowerCAmelCase__ : Union[str, Any] = load_tf_weights_in_mobilebert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 308 | 1 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 22 ):
"""simple docstring"""
__lowercase = range(1 , UpperCamelCase__ )
__lowercase = range(1 , UpperCamelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 442 |
"""simple docstring"""
import os
import numpy
import onnx
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__lowercase = a.name
__lowercase = b.name
__lowercase = """"""
__lowercase = """"""
__lowercase = a == b
__lowercase = name_a
__lowercase = name_b
return res
def lowerCAmelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
"""simple docstring"""
__lowercase = list(model.graph.initializer )
__lowercase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase = inits[i].name
__lowercase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__lowercase = os.path.dirname(UpperCamelCase__ )
__lowercase = os.path.basename(UpperCamelCase__ )
__lowercase = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
__lowercase = list(model.graph.initializer )
__lowercase = set()
__lowercase = {}
__lowercase = []
__lowercase = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
__lowercase = inits[j].data_type
__lowercase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCamelCase__ )
total_reduced_size += mem_size
__lowercase = inits[i].name
__lowercase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
__lowercase = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__lowercase = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowercase = """optimized_""" + model_file_name
__lowercase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 442 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__: Optional[Any] = logging.get_logger(__name__)
a__: Any = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
a__: Optional[Any] = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def UpperCamelCase__( UpperCamelCase__ : List[Any] )->Any:
A__ = torch.load(UpperCamelCase__ , map_location='''cpu''' )
return sd
def UpperCamelCase__( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=rename_keys_prefix )->str:
A__ = OrderedDict()
A__ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ = key
for name_pair in rename_keys_prefix:
A__ = new_key.replace(name_pair[0] , name_pair[1] )
A__ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : int )->str:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
A__ = '''pretraining'''
if "vcr" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 5_12}
elif "vqa_advanced" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 20_48}
elif "vqa" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 20_48}
elif "nlvr" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 10_24}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 5_12}
A__ = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 20_48}
A__ = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 20_48, '''num_labels''': 31_29}
A__ = '''vqa'''
elif "nlvr" in checkpoint_path:
A__ = {
'''visual_embedding_dim''': 10_24,
'''num_labels''': 2,
}
A__ = '''nlvr'''
A__ = VisualBertConfig(**UpperCamelCase__ )
# Load State Dict
A__ = load_state_dict(UpperCamelCase__ )
A__ = get_new_dict(UpperCamelCase__ , UpperCamelCase__ )
if model_type == "pretraining":
A__ = VisualBertForPreTraining(UpperCamelCase__ )
elif model_type == "vqa":
A__ = VisualBertForQuestionAnswering(UpperCamelCase__ )
elif model_type == "nlvr":
A__ = VisualBertForVisualReasoning(UpperCamelCase__ )
elif model_type == "multichoice":
A__ = VisualBertForMultipleChoice(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a__: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a__: Tuple = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 190 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = CTRLTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
A__ = dict(zip(__lowerCamelCase,range(len(__lowerCamelCase ) ) ) )
A__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''vocab_file'''] )
A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file,'''w''',encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + '''\n''' )
with open(self.merges_file,'''w''',encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCamelCase ) )
def UpperCamelCase ( self,**__lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = '''adapt react readapt apt'''
A__ = '''adapt react readapt apt'''
return input_text, output_text
def UpperCamelCase ( self ):
A__ = CTRLTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
A__ = '''adapt react readapt apt'''
A__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
A__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ),__lowerCamelCase )
| 190 | 1 |
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 0 ) -> int:
"""simple docstring"""
__a = right or len(a_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(a_ , a_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self : int ):
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__a = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
@slow
def __a ( self : int ):
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__a = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 201 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCAmelCase__ : Optional[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class a ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ):
lowercase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowercase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowercase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowercase = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase = model(_lowerCamelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
@slow
def UpperCamelCase_ ( self ):
lowercase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowercase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowercase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowercase = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowercase = model(_lowerCamelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
| 702 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : str = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 134 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def snake_case__ ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
SCREAMING_SNAKE_CASE = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
SCREAMING_SNAKE_CASE = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
SCREAMING_SNAKE_CASE = {
'''do_resize''': True,
'''size''': 2_0,
'''do_center_crop''': True,
'''crop_size''': 1_8,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(snake_case_ , snake_case_ )
def snake_case__ ( self :List[str] , **lowercase :Optional[Any] ) -> Tuple:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def snake_case__ ( self :Dict , **lowercase :str ) -> Optional[Any]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def snake_case__ ( self :Optional[Any] , **lowercase :List[Any] ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def snake_case__ ( self :Tuple ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case_ )
self.assertIsInstance(processor_fast.tokenizer , snake_case_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case_ )
self.assertIsInstance(processor_fast.image_processor , snake_case_ )
def snake_case__ ( self :str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def snake_case__ ( self :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(snake_case_ , return_tensors='''np''' )
SCREAMING_SNAKE_CASE = processor(images=snake_case_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
SCREAMING_SNAKE_CASE = '''lower newer'''
SCREAMING_SNAKE_CASE = processor(text=snake_case_ )
SCREAMING_SNAKE_CASE = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
SCREAMING_SNAKE_CASE = '''lower newer'''
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def snake_case__ ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(snake_case_ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def snake_case__ ( self :Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
SCREAMING_SNAKE_CASE = '''lower newer'''
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 201 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 521 | 0 |
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE = set()
return any(
node not in visited and depth_first_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
for node in graph)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
visited.add(_UpperCAmelCase)
rec_stk.add(_UpperCAmelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_UpperCAmelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 444 |
from __future__ import annotations
import time
a_ : Tuple = list[tuple[int, int]]
a_ : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _snake_case :
def __init__( self , a , a , a , a , a) -> int:
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = parent
class _snake_case :
def __init__( self , a , a) -> Dict:
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , a)
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , a)
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Path | None:
while self.node_queue:
SCREAMING_SNAKE_CASE = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(a)
SCREAMING_SNAKE_CASE = self.get_successors(a)
for node in successors:
self.node_queue.append(a)
if not self.reached:
return [self.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , a) -> list[Node]:
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(a) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(a , a , self.target.pos_y , self.target.pos_x , a))
return successors
def SCREAMING_SNAKE_CASE__ ( self , a) -> Path:
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
class _snake_case :
def __init__( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = BreadthFirstSearch(a , a)
SCREAMING_SNAKE_CASE = BreadthFirstSearch(a , a)
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE = self.fwd_bfs.node_queue.pop(0)
SCREAMING_SNAKE_CASE = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_bidirectional_path(
a , a)
SCREAMING_SNAKE_CASE = current_bwd_node
SCREAMING_SNAKE_CASE = current_fwd_node
SCREAMING_SNAKE_CASE = {
self.fwd_bfs: self.fwd_bfs.get_successors(a),
self.bwd_bfs: self.bwd_bfs.get_successors(a),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(a)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Path:
SCREAMING_SNAKE_CASE = self.fwd_bfs.retrace_path(a)
SCREAMING_SNAKE_CASE = self.bwd_bfs.retrace_path(a)
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a_ : Union[str, Any] = (0, 0)
a_ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ : Optional[Any] = time.time()
a_ : Union[str, Any] = BreadthFirstSearch(init, goal)
a_ : Optional[Any] = bfs.search()
a_ : List[str] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a_ : Optional[int] = time.time()
a_ : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
a_ : List[Any] = bd_bfs.search()
a_ : List[str] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 444 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.