code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Optional[int] , lowercase_: Optional[Any]=False ) -> Union[str, Any]:
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
A__ : Union[str, Any] = len(set_a.intersection(lowercase_ ) )
if alternative_union:
A__ : Union[str, Any] = len(lowercase_ ) + len(lowercase_ )
else:
A__ : Optional[int] = len(set_a.union(lowercase_ ) )
return intersection / union
if isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) ):
A__ : Any = [element for element in set_a if element in set_b]
if alternative_union:
A__ : Union[str, Any] = len(lowercase_ ) + len(lowercase_ )
return len(lowercase_ ) / union
else:
A__ : Optional[int] = set_a + [element for element in set_b if element not in set_a]
return len(lowercase_ ) / len(lowercase_ )
return len(lowercase_ ) / len(lowercase_ )
return None
if __name__ == "__main__":
A_ : Optional[Any] = {'a', 'b', 'c', 'd', 'e'}
A_ : List[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 64 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 1 |
from __future__ import annotations
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : List[Any] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(A__ ) != 0:
A__ : Optional[Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(A__ ) != cols:
raise error
for value in row:
if not isinstance(A__ , (int, float) ):
raise error
A__ : List[Any] = rows
else:
A__ : Union[str, Any] = []
def __A ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __A ( self ):
return len(self.rows )
@property
def __A ( self ):
return len(self.rows[0] )
@property
def __A ( self ):
return (self.num_rows, self.num_columns)
@property
def __A ( self ):
return self.order[0] == self.order[1]
def __A ( self ):
A__ : Any = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(A__ )
def __A ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __A ( self ):
return bool(self.determinant() )
def __A ( self , A__ , A__ ):
A__ : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(A__ ).determinant()
def __A ( self , A__ , A__ ):
if (row + column) % 2 == 0:
return self.get_minor(A__ , A__ )
return -1 * self.get_minor(A__ , A__ )
def __A ( self ):
return Matrix(
[
[self.get_minor(A__ , A__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __A ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __A ( self ):
A__ : Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(A__ )
def __A ( self ):
A__ : Optional[int] = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(A__ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def __A ( self , A__ , A__ = None ):
A__ : List[Any] = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(A__ , A__ ):
raise type_error
for value in row:
if not isinstance(A__ , (int, float) ):
raise type_error
if len(A__ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(A__ )
else:
A__ : Optional[int] = self.rows[0:position] + [row] + self.rows[position:]
def __A ( self , A__ , A__ = None ):
A__ : Any = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(A__ , A__ ):
raise type_error
for value in column:
if not isinstance(A__ , (int, float) ):
raise type_error
if len(A__ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
A__ : int = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A__ : Dict = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , A__ ):
if not isinstance(A__ , A__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , A__ ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , A__ ):
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , A__ ):
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , A__ ):
if isinstance(A__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(A__ , A__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(A__ , A__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , A__ ):
if not isinstance(A__ , A__ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
A__ : Union[str, Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __A ( cls , A__ , A__ ):
return sum(row[i] * column[i] for i in range(len(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A_ : Any = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 1 |
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(lowercase_ ) * abs(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 64 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
super().__init__(*A__ , **A__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , A__=None , A__=None , A__=None ):
A__ : Optional[Any] = {}
A__ : Any = {}
if prompt is not None:
A__ : str = prompt
if generate_kwargs is not None:
A__ : Any = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A__ : Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
A__ : int = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , A__ , **A__ ):
return super().__call__(A__ , **A__ )
def __A ( self , A__ , A__=None ):
A__ : List[Any] = load_image(A__ )
if prompt is not None:
if not isinstance(A__ , A__ ):
raise ValueError(
F"""Received an invalid text input, got - {type(A__ )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
A__ : Any = self.model.config.model_type
if model_type == "git":
A__ : int = self.image_processor(images=A__ , return_tensors=self.framework )
A__ : Optional[int] = self.tokenizer(text=A__ , add_special_tokens=A__ ).input_ids
A__ : List[str] = [self.tokenizer.cls_token_id] + input_ids
A__ : List[str] = torch.tensor(A__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
A__ : Any = self.image_processor(images=A__ , header_text=A__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A__ : Dict = self.image_processor(images=A__ , return_tensors=self.framework )
A__ : List[Any] = self.tokenizer(A__ , return_tensors=self.framework )
model_inputs.update(A__ )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
A__ : Union[str, Any] = self.image_processor(images=A__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A__ : List[str] = None
return model_inputs
def __A ( self , A__ , A__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , A__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
A__ : Optional[Any] = None
if generate_kwargs is None:
A__ : int = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A__ : List[Any] = model_inputs.pop(self.model.main_input_name )
A__ : int = self.model.generate(A__ , **A__ , **A__ )
return model_outputs
def __A ( self , A__ ):
A__ : Optional[Any] = []
for output_ids in model_outputs:
A__ : Dict = {
"""generated_text""": self.tokenizer.decode(
A__ , skip_special_tokens=A__ , )
}
records.append(A__ )
return records
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
def UpperCamelCase (lowercase_: int = 2000000 ) -> int:
A__ : Optional[int] = [0 for i in range(n + 1 )]
A__ : List[str] = 1
A__ : str = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase_ ):
A__ : str = 1
A__ : int = 0
for i in range(lowercase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 1 |
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: List[str] , lowercase_: Dict , lowercase_: Union[str, Any] ) -> Tuple:
if height >= 1:
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
move_disk(lowercase_ , lowercase_ )
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Optional[int] ) -> int:
print("""moving disk from""" , lowercase_ , """to""" , lowercase_ )
def UpperCamelCase () -> List[str]:
A__ : Union[str, Any] = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowercase_ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
from __future__ import annotations
from collections import deque
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : list[dict] = []
self.adlist.append(
{"""value""": """""", """next_states""": [], """fail_state""": 0, """output""": []} )
for keyword in keywords:
self.add_keyword(A__ )
self.set_fail_transitions()
def __A ( self , A__ , A__ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __A ( self , A__ ):
A__ : Any = 0
for character in keyword:
A__ : int = self.find_next_state(A__ , A__ )
if next_state is None:
self.adlist.append(
{
"""value""": character,
"""next_states""": [],
"""fail_state""": 0,
"""output""": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
A__ : List[Any] = len(self.adlist ) - 1
else:
A__ : Optional[int] = next_state
self.adlist[current_state]["output"].append(A__ )
def __A ( self ):
A__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(A__ )
A__ : Tuple = 0
while q:
A__ : List[Any] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(A__ )
A__ : Dict = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(A__ , self.adlist[child]["""value"""] ) is None
and state != 0
):
A__ : Tuple = self.adlist[state]["""fail_state"""]
A__ : Optional[int] = self.find_next_state(
A__ , self.adlist[child]["""value"""] )
if self.adlist[child]["fail_state"] is None:
A__ : int = 0
A__ : Union[str, Any] = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def __A ( self , A__ ):
A__ : dict = {} # returns a dict with keywords and list of its occurrences
A__ : List[str] = 0
for i in range(len(A__ ) ):
while (
self.find_next_state(A__ , string[i] ) is None
and current_state != 0
):
A__ : int = self.adlist[current_state]["""fail_state"""]
A__ : Optional[int] = self.find_next_state(A__ , string[i] )
if next_state is None:
A__ : Optional[int] = 0
else:
A__ : List[Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
A__ : List[Any] = []
result[key].append(i - len(A__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ['PoolFormerFeatureExtractor']
A_ : int = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 64 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : Optional[Any] = 16
A_ : int = 32
def UpperCamelCase (lowercase_: Accelerator , lowercase_: int = 16 ) -> Any:
A__ : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A__ : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase_: Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ : Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase_ , max_length=lowercase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ : Union[str, Any] = datasets.map(
lowercase_ , batched=lowercase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ : Dict = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase_: Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
A__ : str = 8
else:
A__ : Tuple = None
return tokenizer.pad(
lowercase_ , padding="""longest""" , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
A__ : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
A__ : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Tuple = mocked_dataloaders # noqa: F811
def UpperCamelCase (lowercase_: Any , lowercase_: Dict ) -> Union[str, Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase_ ) == "1":
A__ : int = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
A__ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
A__ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ : Tuple = config["""lr"""]
A__ : List[str] = int(config["""num_epochs"""] )
A__ : Dict = int(config["""seed"""] )
A__ : Dict = int(config["""batch_size"""] )
set_seed(lowercase_ )
A__ , A__ : List[str] = get_dataloaders(lowercase_ , lowercase_ )
A__ : Any = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
A__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
A__ : Union[str, Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A__ : int = AdamW(params=model.parameters() , lr=lowercase_ )
# Instantiate scheduler
A__ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ : Dict = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
A__ : List[Any] = os.path.split(lowercase_ )[-1].split(""".""" )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Now we train the model
for epoch in range(lowercase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
A__ : Any = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ : str = model(**lowercase_ )
A__ : Optional[Any] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
A__ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowercase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
A__ : List[str] = model(**lowercase_ )
A__ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
A__ , A__ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase_ , references=lowercase_ , )
A__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(lowercase_ ),
"""epoch""": epoch,
} , step=lowercase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def UpperCamelCase () -> str:
A__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase_ , default=lowercase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=lowercase_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
A__ : Optional[int] = parser.parse_args()
A__ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
def UpperCamelCase (lowercase_: List[Any] , lowercase_: List[Any] , lowercase_: str , lowercase_: Union[str, Any] , lowercase_: List[Any] , lowercase_: Dict ) -> Any:
if index == r:
for j in range(lowercase_ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
A__ : Optional[Any] = arr[i]
combination_util(lowercase_ , lowercase_ , lowercase_ , index + 1 , lowercase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Optional[Any] , lowercase_: Any ) -> str:
# A temporary array to store all combination one by one
A__ : Optional[int] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase_ , lowercase_ , lowercase_ , 0 , lowercase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
A_ : Optional[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
A__ : Tuple = DPTConfig()
if "large" in checkpoint_url:
A__ : List[str] = 1024
A__ : Any = 4096
A__ : Any = 24
A__ : List[Any] = 16
A__ : int = [5, 11, 17, 23]
A__ : List[str] = [256, 512, 1024, 1024]
A__ : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
A__ : int = True
A__ : Optional[int] = 150
A__ : List[str] = """huggingface/label-files"""
A__ : Optional[int] = """ade20k-id2label.json"""
A__ : Optional[Any] = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type="""dataset""" ) ) , """r""" ) )
A__ : Dict = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ : Dict = idalabel
A__ : Optional[Any] = {v: k for k, v in idalabel.items()}
A__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Optional[Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Tuple ) -> int:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ : List[Any] = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
A__ : Union[str, Any] = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
A__ : str = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
A__ : str = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
A__ : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
A__ : Union[str, Any] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
A__ : Optional[Any] = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
A__ : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A__ : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
A__ : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
A__ : int = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
A__ : Union[str, Any] = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
A__ : str = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
A__ : Tuple = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
A__ : int = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
A__ : int = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
A__ : Any = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ : Dict = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
A__ : Optional[int] = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
A__ : Any = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
A__ : Dict = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
A__ : Optional[int] = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
A__ : Any = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ : Optional[int] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ : Any = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ : str = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ : Any = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A__ : int = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A__ : Tuple = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A__ : int = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A__ : Optional[Any] = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A__ : Optional[Any] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
A__ : Optional[int] = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
A__ : int = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
A__ : Tuple = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
A__ : int = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def UpperCamelCase (lowercase_: Tuple , lowercase_: int ) -> List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : int = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
A__ : str = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ : Any = in_proj_weight[: config.hidden_size, :]
A__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
A__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase () -> Dict:
A__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase (lowercase_: Tuple , lowercase_: Any , lowercase_: Any , lowercase_: str ) -> str:
A__ , A__ : int = get_dpt_config(lowercase_ )
# load original state_dict from URL
A__ : Tuple = torch.hub.load_state_dict_from_url(lowercase_ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(lowercase_ )
# rename keys
for key in state_dict.copy().keys():
A__ : Optional[int] = state_dict.pop(lowercase_ )
A__ : Dict = val
# read in qkv matrices
read_in_q_k_v(lowercase_ , lowercase_ )
# load HuggingFace model
A__ : str = DPTForSemanticSegmentation(lowercase_ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# Check outputs on an image
A__ : Any = 480 if """ade""" in checkpoint_url else 384
A__ : List[str] = DPTImageProcessor(size=lowercase_ )
A__ : int = prepare_img()
A__ : List[str] = image_processor(lowercase_ , return_tensors="""pt""" )
# forward pass
A__ : Tuple = model(**lowercase_ ).logits if """ade""" in checkpoint_url else model(**lowercase_ ).predicted_depth
# Assert logits
A__ : Optional[int] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
A__ : Union[str, Any] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowercase_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowercase_ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowercase_ )
)
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
A_ : Optional[int] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[Any] = {'vocab_file': 'spiece.model'}
A_ : List[str] = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
A_ : Optional[int] = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: int = VOCAB_FILES_NAMES
UpperCAmelCase__: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , A__ , A__=False , A__=False , A__=False , A__=None , A__=None , A__=None , A__=None , A__ = None , **A__ , ):
A__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A__ : str = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
A__ : Optional[Any] = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
A__ : List[Any] = """<|endoftext|>""" if eos_token is None else eos_token
A__ : Optional[Any] = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
A__ : Dict = unk_token if pad_token is None else pad_token
A__ : Union[str, Any] = eos_token if bos_token is None else bos_token
else:
A__ : Optional[int] = """<pad>""" if pad_token is None else pad_token
A__ : int = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , bos_token=A__ , eos_token=A__ , unk_token=A__ , pad_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
A__ : Union[str, Any] = do_lower_case
A__ : int = remove_space
A__ : Any = keep_accents
A__ : Dict = vocab_file
A__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
# Used for whitespace normalization in input texts
# fmt : off
A__ : Union[str, Any] = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
A__ : int = re.compile(
F"""[{''.join(map(A__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ):
A__ : Tuple = self.__dict__.copy()
A__ : Union[str, Any] = None
return state
def __setstate__( self , A__ ):
A__ : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : List[str] = {}
A__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __A ( self ):
return len(self.sp_model )
def __A ( self , A__ ):
A__ : Any = self.non_printing_characters_re.sub("""""" , A__ )
# Normalize whitespaces
A__ : Any = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
A__ : Tuple = unicodedata.normalize("""NFC""" , A__ )
return text
def __A ( self , A__ , **A__ ):
A__ : Tuple = self.preprocess_text(A__ )
return self.sp_model.encode(A__ , out_type=A__ )
def __A ( self , A__ ):
return self.sp_model.PieceToId(A__ )
def __A ( self , A__ ):
return self.sp_model.IdToPiece(A__ )
@staticmethod
def __A ( A__ ):
return out_string
def __A ( self , A__ ):
A__ : int = []
A__ : Dict = """"""
A__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
A__ : Optional[int] = True
A__ : Optional[Any] = []
else:
current_sub_tokens.append(A__ )
A__ : List[Any] = False
out_string += self.sp_model.decode(A__ )
return out_string
def __A ( self ):
A__ : List[Any] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : Union[str, Any] = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __A ( self , A__ , A__ = False ):
if isinstance(A__ , A__ ):
A__ : Optional[Any] = self.preprocess_text(A__ )
A__ : Optional[int] = self.sp_model.encode(A__ )
else:
A__ : Union[str, Any] = [self.preprocess_text(A__ ) for t in text]
A__ : int = self.sp_model.encode(A__ )
if return_tensors is True or return_tensors == "pt":
A__ : Union[str, Any] = torch.tensor(A__ )
return token_ids
def __A ( self , A__ ):
return self.sp_model.decode(A__ )
def __A ( self , A__ ):
A__ : List[Any] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
A__ : List[str] = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(A__ ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=A__ )
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Any = '''AutoTokenizer'''
UpperCAmelCase__: List[str] = ['''tokenizer''']
UpperCAmelCase__: str = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , A__ , A__=None ):
super().__init__(A__ )
A__ : str = speaker_embeddings
@classmethod
def __A ( cls , A__ , A__="speaker_embeddings_path.json" , **A__ ):
if speaker_embeddings_dict_path is not None:
A__ : Union[str, Any] = get_file_from_repo(
A__ , A__ , subfolder=kwargs.pop("""subfolder""" , A__ ) , cache_dir=kwargs.pop("""cache_dir""" , A__ ) , force_download=kwargs.pop("""force_download""" , A__ ) , proxies=kwargs.pop("""proxies""" , A__ ) , resume_download=kwargs.pop("""resume_download""" , A__ ) , local_files_only=kwargs.pop("""local_files_only""" , A__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , A__ ) , revision=kwargs.pop("""revision""" , A__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(A__ , A__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
A__ : Optional[Any] = None
else:
with open(A__ ) as speaker_embeddings_json:
A__ : List[Any] = json.load(A__ )
else:
A__ : List[Any] = None
A__ : Dict = AutoTokenizer.from_pretrained(A__ , **A__ )
return cls(tokenizer=A__ , speaker_embeddings=A__ )
def __A ( self , A__ , A__="speaker_embeddings_path.json" , A__="speaker_embeddings" , A__ = False , **A__ , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A__ , A__ , """v2""" ) , exist_ok=A__ )
A__ : List[str] = {}
A__ : Dict = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
A__ : Union[str, Any] = self._load_voice_preset(A__ )
A__ : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , A__ , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=A__ , )
A__ : Tuple = os.path.join(A__ , F"""{prompt_key}_{key}.npy""" )
A__ : List[Any] = tmp_dict
with open(os.path.join(A__ , A__ ) , """w""" ) as fp:
json.dump(A__ , A__ )
super().save_pretrained(A__ , A__ , **A__ )
def __A ( self , A__ = None , **A__ ):
A__ : Optional[int] = self.speaker_embeddings[voice_preset]
A__ : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
A__ : List[str] = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , A__ ) , cache_dir=kwargs.pop("""cache_dir""" , A__ ) , force_download=kwargs.pop("""force_download""" , A__ ) , proxies=kwargs.pop("""proxies""" , A__ ) , resume_download=kwargs.pop("""resume_download""" , A__ ) , local_files_only=kwargs.pop("""local_files_only""" , A__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , A__ ) , revision=kwargs.pop("""revision""" , A__ ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
A__ : Tuple = np.load(A__ )
return voice_preset_dict
def __A ( self , A__ = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , A__=None , A__=None , A__="pt" , A__=256 , A__=False , A__=True , A__=False , **A__ , ):
if voice_preset is not None and not isinstance(A__ , A__ ):
if (
isinstance(A__ , A__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
A__ : Union[str, Any] = self._load_voice_preset(A__ )
else:
if isinstance(A__ , A__ ) and not voice_preset.endswith(""".npz""" ):
A__ : str = voice_preset + """.npz"""
A__ : str = np.load(A__ )
if voice_preset is not None:
self._validate_voice_preset_dict(A__ , **A__ )
A__ : Tuple = BatchFeature(data=A__ , tensor_type=A__ )
A__ : Tuple = self.tokenizer(
A__ , return_tensors=A__ , padding="""max_length""" , max_length=A__ , return_attention_mask=A__ , return_token_type_ids=A__ , add_special_tokens=A__ , **A__ , )
if voice_preset is not None:
A__ : Any = voice_preset
return encoded_text
| 64 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def UpperCamelCase (lowercase_: Optional[int] ) -> List[str]:
for param in module.parameters():
A__ : Union[str, Any] = False
def UpperCamelCase () -> List[str]:
A__ : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
A__ : Any = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def UpperCamelCase (lowercase_: Any ) -> List[str]:
A__ : Tuple = plt.imshow(lowercase_ )
fig.axes.get_xaxis().set_visible(lowercase_ )
fig.axes.get_yaxis().set_visible(lowercase_ )
plt.show()
def UpperCamelCase () -> Optional[Any]:
A__ : Optional[int] = datetime.now()
A__ : Union[str, Any] = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = MobileBertTokenizer
UpperCAmelCase__: Any = MobileBertTokenizerFast
UpperCAmelCase__: int = True
UpperCAmelCase__: int = True
UpperCAmelCase__: Dict = filter_non_english
UpperCAmelCase__: Union[str, Any] = '''google/mobilebert-uncased'''
def __A ( self ):
super().setUp()
A__ : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
A__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A__ : Any = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __A ( self , A__ ):
A__ : int = """UNwant\u00E9d,running"""
A__ : List[str] = """unwanted, running"""
return input_text, output_text
def __A ( self ):
A__ : Tuple = self.tokenizer_class(self.vocab_file )
A__ : str = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [9, 6, 7, 12, 10, 11] )
def __A ( self ):
if not self.test_rust_tokenizer:
return
A__ : List[str] = self.get_tokenizer()
A__ : int = self.get_rust_tokenizer()
A__ : Optional[Any] = """UNwant\u00E9d,running"""
A__ : Optional[Any] = tokenizer.tokenize(A__ )
A__ : str = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : List[str] = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : Dict = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
A__ : Optional[Any] = self.get_rust_tokenizer()
A__ : Optional[Any] = tokenizer.encode(A__ )
A__ : List[str] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# With lower casing
A__ : List[str] = self.get_tokenizer(do_lower_case=A__ )
A__ : List[str] = self.get_rust_tokenizer(do_lower_case=A__ )
A__ : Dict = """UNwant\u00E9d,running"""
A__ : str = tokenizer.tokenize(A__ )
A__ : int = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : str = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : int = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
A__ : List[Any] = self.get_rust_tokenizer()
A__ : Optional[Any] = tokenizer.encode(A__ )
A__ : Optional[int] = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
def __A ( self ):
A__ : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __A ( self ):
A__ : str = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : Optional[int] = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __A ( self ):
A__ : Dict = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : Tuple = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __A ( self ):
A__ : Dict = BasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Tuple = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Optional[Any] = BasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __A ( self ):
A__ : Union[str, Any] = BasicTokenizer(do_lower_case=A__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __A ( self ):
A__ : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
A__ : Tuple = {}
for i, token in enumerate(A__ ):
A__ : int = i
A__ : Dict = WordpieceTokenizer(vocab=A__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __A ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __A ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __A ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __A ( self ):
A__ : int = self.get_tokenizer()
A__ : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def __A ( self ):
A__ : int = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
A__ : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
A__ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
A__ : List[Any] = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : Dict = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : int = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : int = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
A__ : str = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
A__ : Optional[int] = tokenizer_r.do_lower_case if hasattr(A__ , """do_lower_case""" ) else False
A__ : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __A ( self ):
A__ : Any = ["""的""", """人""", """有"""]
A__ : Union[str, Any] = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : Tuple = True
A__ : List[Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Dict = tokenizer_p.encode(A__ , add_special_tokens=A__ )
A__ : str = tokenizer_r.encode(A__ , add_special_tokens=A__ )
A__ : int = tokenizer_r.convert_ids_to_tokens(A__ )
A__ : int = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
A__ : str = False
A__ : List[Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Union[str, Any] = tokenizer_r.encode(A__ , add_special_tokens=A__ )
A__ : Union[str, Any] = tokenizer_p.encode(A__ , add_special_tokens=A__ )
A__ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(A__ )
A__ : Any = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
A__ : Optional[int] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
| 64 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = XLMTokenizer
UpperCAmelCase__: List[Any] = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
A__ : Tuple = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : str = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
A__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(A__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(A__ ) )
def __A ( self , A__ ):
A__ : Tuple = """lower newer"""
A__ : Optional[int] = """lower newer"""
return input_text, output_text
def __A ( self ):
A__ : int = XLMTokenizer(self.vocab_file , self.merges_file )
A__ : Tuple = """lower"""
A__ : int = ["""low""", """er</w>"""]
A__ : Optional[int] = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
A__ : List[str] = tokens + ["""<unk>"""]
A__ : Dict = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
@slow
def __A ( self ):
A__ : Union[str, Any] = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
A__ : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
A__ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
A__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 64 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ = None , A__ = None , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , **A__ , ):
super().__init__(
A__ , split=A__ , features=A__ , cache_dir=A__ , keep_in_memory=A__ , streaming=A__ , num_proc=A__ , **A__ , )
A__ : Tuple = field
A__ : Optional[int] = path_or_paths if isinstance(A__ , A__ ) else {self.split: path_or_paths}
A__ : List[Any] = Json(
cache_dir=A__ , data_files=A__ , features=A__ , field=A__ , **A__ , )
def __A ( self ):
# Build iterable dataset
if self.streaming:
A__ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ : str = None
A__ : str = None
A__ : Union[str, Any] = None
A__ : Any = None
self.builder.download_and_prepare(
download_config=A__ , download_mode=A__ , verification_mode=A__ , base_path=A__ , num_proc=self.num_proc , )
A__ : Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=A__ , in_memory=self.keep_in_memory )
return dataset
class _a :
'''simple docstring'''
def __init__( self , A__ , A__ , A__ = None , A__ = None , **A__ , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
A__ : int = dataset
A__ : Any = path_or_buf
A__ : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A__ : str = num_proc
A__ : Union[str, Any] = """utf-8"""
A__ : List[Any] = to_json_kwargs
def __A ( self ):
A__ : List[Any] = self.to_json_kwargs.pop("""path_or_buf""" , A__ )
A__ : List[Any] = self.to_json_kwargs.pop("""orient""" , """records""" )
A__ : Optional[Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A__ : Optional[int] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A__ : int = self.to_json_kwargs.pop("""compression""" , A__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=A__ ) as buffer:
A__ : Optional[Any] = self._write(file_obj=A__ , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
""" was passed. Please provide a local path instead.""" )
A__ : int = self._write(
file_obj=self.path_or_buf , orient=A__ , lines=A__ , index=A__ , **self.to_json_kwargs )
return written
def __A ( self , A__ ):
A__ , A__ , A__ , A__ , A__ : int = args
A__ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(A__ , offset + self.batch_size ) , indices=self.dataset._indices , )
A__ : List[Any] = batch.to_pandas().to_json(
path_or_buf=A__ , orient=A__ , lines=A__ , index=A__ , **A__ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def __A ( self , A__ , A__ , A__ , A__ , **A__ , ):
A__ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A__ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(A__ )
else:
A__ , A__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , A__ , A__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(A__ )
return written
| 64 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=64 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=[1, 16, 4, 4] , A__=None , ):
A__ : int = parent
A__ : str = batch_size
A__ : Tuple = image_size
A__ : List[Any] = patch_size
A__ : Optional[Any] = num_channels
A__ : List[str] = is_training
A__ : List[Any] = use_labels
A__ : Optional[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : List[Any] = num_attention_heads
A__ : Any = intermediate_size
A__ : Optional[int] = hidden_act
A__ : List[str] = hidden_dropout_prob
A__ : str = attention_probs_dropout_prob
A__ : int = type_sequence_label_size
A__ : Dict = initializer_range
A__ : Tuple = scope
A__ : Dict = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ : Optional[int] = (self.image_size // 32) ** 2
A__ : List[str] = num_patches + 1
def __A ( self ):
A__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : str = None
if self.use_labels:
A__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Dict = self.get_config()
return config, pixel_values, labels
def __A ( self ):
A__ : Union[str, Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A__ , )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[int] = ViTHybridModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , A__ , A__ , A__ ):
A__ : Dict = self.type_sequence_label_size
A__ : Dict = ViTHybridForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : Any = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
A__ : List[str] = self.prepare_config_and_inputs()
A__ , A__ , A__ : Union[str, Any] = config_and_inputs
A__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: str = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__: Tuple = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__: Dict = False
UpperCAmelCase__: List[Any] = False
UpperCAmelCase__: Tuple = False
def __A ( self ):
A__ : Union[str, Any] = ViTHybridModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __A ( self ):
pass
def __A ( self ):
A__ , A__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def __A ( self ):
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[str] = model_class(A__ )
A__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] = [*signature.parameters.keys()]
A__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
def __A ( self ):
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = _config_zero_init(A__ )
for model_class in self.all_model_classes:
A__ : List[str] = model_class(config=A__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ : Tuple = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __A ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = ViTHybridModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCamelCase () -> Any:
A__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ):
A__ : str = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A__ )
A__ : Optional[Any] = self.default_image_processor
A__ : Tuple = prepare_img()
A__ : Optional[int] = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
A__ : List[Any] = model(**A__ )
# verify the logits
A__ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
A__ : List[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
@slow
@require_accelerate
def __A ( self ):
A__ : str = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
A__ : Tuple = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
A__ : Optional[Any] = prepare_img()
A__ : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""" )
A__ : Union[str, Any] = model(**A__ )
A__ : Tuple = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
from __future__ import annotations
class _a :
'''simple docstring'''
def __init__( self , A__ , A__ ):
A__ , A__ : Dict = text, pattern
A__ , A__ : Optional[int] = len(A__ ), len(A__ )
def __A ( self , A__ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __A ( self , A__ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __A ( self ):
# searches pattern in text and returns index positions
A__ : Optional[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
A__ : Dict = self.mismatch_in_text(A__ )
if mismatch_index == -1:
positions.append(A__ )
else:
A__ : str = self.match_in_pattern(self.text[mismatch_index] )
A__ : Optional[Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A_ : List[str] = 'ABAABA'
A_ : List[str] = 'AB'
A_ : Optional[int] = BoyerMooreSearch(text, pattern)
A_ : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 64 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 1 |
import string
from math import logaa
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> int:
A__ : Union[str, Any] = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
A__ : Dict = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> tuple[int, int]:
A__ : List[Any] = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
A__ : Optional[Any] = corpus_without_punctuation.split("""\n""" )
A__ : Optional[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase_ ))
def UpperCamelCase (lowercase_: int , lowercase_: int , lowercase_: List[Any]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> float:
return round(tf * idf , 3 )
| 64 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase () -> Any:
A__ : Any = HfArgumentParser(lowercase_ )
A__ : List[Any] = parser.parse_args_into_dataclasses()[0]
A__ : str = TensorFlowBenchmark(args=lowercase_ )
try:
A__ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A__ : List[Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A__ : Tuple = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
A__ : str = """"""
A__ : Tuple = eval(str(lowercase_ ).split(""" """ )[-1] )
A__ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
A__ : str = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 64 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
A_ : Any = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase__: List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase__: List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase__: Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = ZeroShotClassificationPipeline(
model=A__ , tokenizer=A__ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __A ( self , A__ , A__ ):
A__ : Union[str, Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ )], """scores""": [ANY(A__ )]} )
# No kwarg
A__ : Tuple = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ )], """scores""": [ANY(A__ )]} )
A__ : Optional[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ )], """scores""": [ANY(A__ )]} )
A__ : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ ), ANY(A__ )], """scores""": [ANY(A__ ), ANY(A__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
A__ : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ ), ANY(A__ )], """scores""": [ANY(A__ ), ANY(A__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
A__ : List[Any] = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(A__ , {"""sequence""": ANY(A__ ), """labels""": [ANY(A__ )], """scores""": [ANY(A__ )]} )
# https://github.com/huggingface/transformers/issues/13846
A__ : Tuple = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
A__ , [
{"""sequence""": ANY(A__ ), """labels""": [ANY(A__ ), ANY(A__ )], """scores""": [ANY(A__ ), ANY(A__ )]}
for i in range(1 )
] , )
A__ : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
A__ , [
{"""sequence""": ANY(A__ ), """labels""": [ANY(A__ ), ANY(A__ )], """scores""": [ANY(A__ ), ANY(A__ )]}
for i in range(2 )
] , )
with self.assertRaises(A__ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(A__ ):
classifier(A__ , candidate_labels="""politics""" )
with self.assertRaises(A__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(A__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=A__ )
with self.assertRaises(A__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(A__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A__ , )
self.run_entailment_id(A__ )
def __A ( self , A__ ):
A__ : str = zero_shot_classifier.model.config
A__ : Any = config.labelaid
A__ : int = zero_shot_classifier.entailment_id
A__ : List[str] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A__ : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A__ : int = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A__ : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A__ : Union[str, Any] = original_labelaid
self.assertEqual(A__ , zero_shot_classifier.entailment_id )
@require_torch
def __A ( self ):
A__ : Optional[int] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __A ( self ):
A__ : Dict = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
A__ : List[str] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def __A ( self ):
A__ : Any = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
A__ : Any = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def __A ( self ):
A__ : List[str] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
A__ : Dict = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A__ : Tuple = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A__ , )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def __A ( self ):
A__ : Optional[Any] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
A__ : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
A__ : Any = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A__ , )
self.assertEqual(
nested_simplify(A__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 64 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = (IPNDMScheduler,)
UpperCAmelCase__: Union[str, Any] = (('''num_inference_steps''', 50),)
def __A ( self , **A__ ):
A__ : str = {"""num_train_timesteps""": 1000}
config.update(**A__ )
return config
def __A ( self , A__=0 , **A__ ):
A__ : Optional[Any] = dict(self.forward_default_kwargs )
A__ : str = kwargs.pop("""num_inference_steps""" , A__ )
A__ : Optional[int] = self.dummy_sample
A__ : List[str] = 0.1 * sample
A__ : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ : Union[str, Any] = self.get_scheduler_config(**A__ )
A__ : Optional[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals
A__ : Dict = dummy_past_residuals[:]
if time_step is None:
A__ : Dict = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
A__ : Dict = scheduler_class.from_pretrained(A__ )
new_scheduler.set_timesteps(A__ )
# copy over dummy past residuals
A__ : List[Any] = dummy_past_residuals[:]
A__ : Dict = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
A__ : int = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A__ : Optional[Any] = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
A__ : Tuple = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self ):
pass
def __A ( self , A__=0 , **A__ ):
A__ : Union[str, Any] = dict(self.forward_default_kwargs )
A__ : int = kwargs.pop("""num_inference_steps""" , A__ )
A__ : Optional[int] = self.dummy_sample
A__ : str = 0.1 * sample
A__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ : str = self.get_scheduler_config()
A__ : List[Any] = scheduler_class(**A__ )
scheduler.set_timesteps(A__ )
# copy over dummy past residuals (must be after setting timesteps)
A__ : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
A__ : Any = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A__ )
A__ : List[str] = scheduler_class.from_pretrained(A__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A__ )
# copy over dummy past residual (must be after setting timesteps)
A__ : Any = dummy_past_residuals[:]
A__ : Dict = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
A__ : List[str] = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A__ : Dict = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
A__ : Union[str, Any] = new_scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self , **A__ ):
A__ : Any = self.scheduler_classes[0]
A__ : List[Any] = self.get_scheduler_config(**A__ )
A__ : Optional[Any] = scheduler_class(**A__ )
A__ : Union[str, Any] = 10
A__ : int = self.dummy_model()
A__ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(A__ )
for i, t in enumerate(scheduler.timesteps ):
A__ : Union[str, Any] = model(A__ , A__ )
A__ : Any = scheduler.step(A__ , A__ , A__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A__ : Tuple = model(A__ , A__ )
A__ : Optional[int] = scheduler.step(A__ , A__ , A__ ).prev_sample
return sample
def __A ( self ):
A__ : Optional[Any] = dict(self.forward_default_kwargs )
A__ : Any = kwargs.pop("""num_inference_steps""" , A__ )
for scheduler_class in self.scheduler_classes:
A__ : Dict = self.get_scheduler_config()
A__ : Union[str, Any] = scheduler_class(**A__ )
A__ : str = self.dummy_sample
A__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(A__ , """set_timesteps""" ):
scheduler.set_timesteps(A__ )
elif num_inference_steps is not None and not hasattr(A__ , """set_timesteps""" ):
A__ : int = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
A__ : int = dummy_past_residuals[:]
A__ : str = scheduler.timesteps[5]
A__ : List[str] = scheduler.timesteps[6]
A__ : List[str] = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
A__ : Union[str, Any] = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A__ : str = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
A__ : Dict = scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self ):
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A__ , time_step=A__ )
def __A ( self ):
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A__ , time_step=A__ )
def __A ( self ):
A__ : Any = self.full_loop()
A__ : int = torch.mean(torch.abs(A__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 64 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 1 |
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _a :
'''simple docstring'''
UpperCAmelCase__: CommonSchedulerState
# setable values
UpperCAmelCase__: jnp.ndarray
UpperCAmelCase__: jnp.ndarray
UpperCAmelCase__: Optional[int] = None
@classmethod
def __A ( cls , A__ , A__ , A__ ):
return cls(common=A__ , init_noise_sigma=A__ , timesteps=A__ )
@dataclass
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: DDPMSchedulerState
class _a (__magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: int = [e.name for e in FlaxKarrasDiffusionSchedulers]
UpperCAmelCase__: jnp.dtype
@property
def __A ( self ):
return True
@register_to_config
def __init__( self , A__ = 1000 , A__ = 0.0_0_0_1 , A__ = 0.0_2 , A__ = "linear" , A__ = None , A__ = "fixed_small" , A__ = True , A__ = "epsilon" , A__ = jnp.floataa , ):
A__ : Union[str, Any] = dtype
def __A ( self , A__ = None ):
if common is None:
A__ : List[str] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
A__ : Optional[Any] = jnp.array(1.0 , dtype=self.dtype )
A__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A__ , init_noise_sigma=A__ , timesteps=A__ , )
def __A ( self , A__ , A__ , A__ = None ):
return sample
def __A ( self , A__ , A__ , A__ = () ):
A__ : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
A__ : str = (jnp.arange(0 , A__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A__ , timesteps=A__ , )
def __A ( self , A__ , A__ , A__=None , A__=None ):
A__ : Dict = state.common.alphas_cumprod[t]
A__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A__ : Optional[int] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
A__ : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
A__ : Union[str, Any] = jnp.clip(A__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
A__ : Optional[int] = jnp.log(jnp.clip(A__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
A__ : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
A__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
A__ : Any = variance
A__ : Optional[Any] = state.common.betas[t]
A__ : Dict = (predicted_variance + 1) / 2
A__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def __A ( self , A__ , A__ , A__ , A__ , A__ = None , A__ = True , ):
A__ : List[str] = timestep
if key is None:
A__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
A__ , A__ : Tuple = jnp.split(A__ , sample.shape[1] , axis=1 )
else:
A__ : Union[str, Any] = None
# 1. compute alphas, betas
A__ : Dict = state.common.alphas_cumprod[t]
A__ : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
A__ : List[Any] = 1 - alpha_prod_t
A__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
A__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A__ : List[str] = jnp.clip(A__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ : Optional[int] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
A__ : int = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
A__ : str = jax.random.split(A__ , num=1 )
A__ : List[Any] = jax.random.normal(A__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A__ , A__ , predicted_variance=A__ ) ** 0.5) * noise
A__ : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
A__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A__ , state=A__ )
def __A ( self , A__ , A__ , A__ , A__ , ):
return add_noise_common(state.common , A__ , A__ , A__ )
def __A ( self , A__ , A__ , A__ , A__ , ):
return get_velocity_common(state.common , A__ , A__ , A__ )
def __len__( self ):
return self.config.num_train_timesteps
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
A_ : int = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> str:
A__ : List[str] = set()
A__ : List[str] = []
def parse_line(lowercase_: Optional[int] ):
for line in fp:
if isinstance(lowercase_ , lowercase_ ):
A__ : Optional[Any] = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(lowercase_ ) > 0:
A__ : Union[str, Any] = """\n""".join(lowercase_ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowercase_ )
buffer.clear()
continue
else:
A__ : Any = line.strip()
buffer.append(lowercase_ )
if from_gh:
for filename in os.listdir(lowercase_ ):
A__ : Union[str, Any] = os.path.join(lowercase_ , lowercase_ )
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowercase_ ) as fp:
parse_line(lowercase_ )
else:
try:
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowercase_ ) as fp:
parse_line(lowercase_ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Optional[Any] ) -> int:
A__ : Dict = set()
A__ : Tuple = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) )
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase (lowercase_: Tuple ) -> Dict:
return values.split(""",""" )
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
A_ : str = parser.parse_args()
A_ : Tuple = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
A_ : List[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
A_ : int = extract_warnings(args.output_dir, args.targets)
A_ : str = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 64 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Tuple = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''mvp'''
UpperCAmelCase__: Dict = ['''past_key_values''']
UpperCAmelCase__: Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , A__=5_0267 , A__=1024 , A__=12 , A__=4096 , A__=16 , A__=12 , A__=4096 , A__=16 , A__=0.0 , A__=0.0 , A__="gelu" , A__=1024 , A__=0.1 , A__=0.0 , A__=0.0 , A__=0.0_2 , A__=0.0 , A__=False , A__=True , A__=1 , A__=0 , A__=2 , A__=True , A__=2 , A__=2 , A__=False , A__=100 , A__=800 , **A__ , ):
A__ : int = vocab_size
A__ : Optional[Any] = max_position_embeddings
A__ : Union[str, Any] = d_model
A__ : int = encoder_ffn_dim
A__ : List[Any] = encoder_layers
A__ : Tuple = encoder_attention_heads
A__ : Tuple = decoder_ffn_dim
A__ : Optional[Any] = decoder_layers
A__ : List[Any] = decoder_attention_heads
A__ : Union[str, Any] = dropout
A__ : List[Any] = attention_dropout
A__ : Optional[Any] = activation_dropout
A__ : List[str] = activation_function
A__ : List[str] = init_std
A__ : Optional[int] = encoder_layerdrop
A__ : str = decoder_layerdrop
A__ : Dict = classifier_dropout
A__ : Tuple = use_cache
A__ : List[str] = encoder_layers
A__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
A__ : Dict = use_prompt
A__ : Optional[int] = prompt_length
A__ : int = prompt_mid_dim
super().__init__(
pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , is_encoder_decoder=A__ , decoder_start_token_id=A__ , forced_eos_token_id=A__ , **A__ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , A__ ):
A__ : str = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 64 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
from ..utils import DummyObject, requires_backends
class _a (metaclass=__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = ['''onnx''']
def __init__( self , *A__ , **A__ ):
requires_backends(self , ["""onnx"""] )
@classmethod
def __A ( cls , *A__ , **A__ ):
requires_backends(cls , ["""onnx"""] )
@classmethod
def __A ( cls , *A__ , **A__ ):
requires_backends(cls , ["""onnx"""] )
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCamelCase (lowercase_: List[Any] ) -> str:
A__ , A__ : Optional[int] = image.size
A__ , A__ : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A__ : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
A__ : int = np.array(lowercase_ ).astype(np.floataa ) / 255.0
A__ : str = image[None].transpose(0 , 3 , 1 , 2 )
A__ : Any = torch.from_numpy(lowercase_ )
return 2.0 * image - 1.0
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ , ):
super().__init__()
self.register_modules(vqvae=A__ , unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self , A__ = None , A__ = 1 , A__ = 100 , A__ = 0.0 , A__ = None , A__ = "pil" , A__ = True , ):
if isinstance(A__ , PIL.Image.Image ):
A__ : Any = 1
elif isinstance(A__ , torch.Tensor ):
A__ : Optional[int] = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A__ )}""" )
if isinstance(A__ , PIL.Image.Image ):
A__ : Tuple = preprocess(A__ )
A__ , A__ : int = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A__ : List[str] = (batch_size, self.unet.config.in_channels // 2, height, width)
A__ : Union[str, Any] = next(self.unet.parameters() ).dtype
A__ : Any = randn_tensor(A__ , generator=A__ , device=self.device , dtype=A__ )
A__ : Union[str, Any] = image.to(device=self.device , dtype=A__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A__ , device=self.device )
A__ : List[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A__ : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A__ : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ : str = {}
if accepts_eta:
A__ : Optional[int] = eta
for t in self.progress_bar(A__ ):
# concat latents and low resolution image in the channel dimension.
A__ : Optional[int] = torch.cat([latents, image] , dim=1 )
A__ : Optional[int] = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
A__ : Dict = self.unet(A__ , A__ ).sample
# compute the previous noisy sample x_t -> x_t-1
A__ : Optional[int] = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# decode the image latents with the VQVAE
A__ : List[str] = self.vqvae.decode(A__ ).sample
A__ : Optional[Any] = torch.clamp(A__ , -1.0 , 1.0 )
A__ : int = image / 2 + 0.5
A__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Tuple = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
def UpperCamelCase (lowercase_: int | float | str ) -> tuple[int, int]:
try:
A__ : Any = float(lowercase_ )
except ValueError:
raise ValueError("""Please enter a valid number""" )
A__ : int = decimal - int(lowercase_ )
if fractional_part == 0:
return int(lowercase_ ), 1
else:
A__ : Optional[int] = len(str(lowercase_ ).split(""".""" )[1] )
A__ : Union[str, Any] = int(decimal * (10**number_of_frac_digits) )
A__ : int = 10**number_of_frac_digits
A__ , A__ : Union[str, Any] = denominator, numerator
while True:
A__ : str = dividend % divisor
if remainder == 0:
break
A__ , A__ : str = divisor, remainder
A__ , A__ : List[Any] = numerator / divisor, denominator / divisor
return int(lowercase_ ), int(lowercase_ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : str = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''linear'''
UpperCAmelCase__: Optional[Any] = '''cosine'''
UpperCAmelCase__: Union[str, Any] = '''cosine_with_restarts'''
UpperCAmelCase__: Optional[int] = '''polynomial'''
UpperCAmelCase__: Dict = '''constant'''
UpperCAmelCase__: Tuple = '''constant_with_warmup'''
UpperCAmelCase__: Tuple = '''piecewise_constant'''
def UpperCamelCase (lowercase_: Optimizer , lowercase_: int = -1 ) -> List[str]:
return LambdaLR(lowercase_ , lambda lowercase_ : 1 , last_epoch=lowercase_ )
def UpperCamelCase (lowercase_: Optimizer , lowercase_: int , lowercase_: int = -1 ) -> str:
def lr_lambda(lowercase_: int ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1.0 , lowercase_ ) )
return 1.0
return LambdaLR(lowercase_ , lowercase_ , last_epoch=lowercase_ )
def UpperCamelCase (lowercase_: Optimizer , lowercase_: str , lowercase_: int = -1 ) -> List[str]:
A__ : List[Any] = {}
A__ : List[str] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : Union[str, Any] = rule_str.split(""":""" )
A__ : List[Any] = int(lowercase_ )
A__ : Union[str, Any] = float(lowercase_ )
A__ : Optional[int] = value
A__ : Dict = float(rule_list[-1] )
def create_rules_function(lowercase_: str , lowercase_: Union[str, Any] ):
def rule_func(lowercase_: int ) -> float:
A__ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowercase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Optional[Any] = create_rules_function(lowercase_ , lowercase_ )
return LambdaLR(lowercase_ , lowercase_ , last_epoch=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Union[str, Any] , lowercase_: Tuple , lowercase_: Optional[int]=-1 ) -> Optional[int]:
def lr_lambda(lowercase_: int ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1 , lowercase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Optimizer , lowercase_: int , lowercase_: int , lowercase_: float = 0.5 , lowercase_: int = -1 ) -> List[Any]:
def lr_lambda(lowercase_: Dict ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1 , lowercase_ ) )
A__ : List[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowercase_ ) * 2.0 * progress )) )
return LambdaLR(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Optimizer , lowercase_: int , lowercase_: int , lowercase_: int = 1 , lowercase_: int = -1 ) -> Optional[int]:
def lr_lambda(lowercase_: str ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1 , lowercase_ ) )
A__ : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowercase_ ) * progress) % 1.0) )) )
return LambdaLR(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: List[str] , lowercase_: int , lowercase_: Dict=1E-7 , lowercase_: List[str]=1.0 , lowercase_: Tuple=-1 ) -> Union[str, Any]:
A__ : Any = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowercase_: int ):
if current_step < num_warmup_steps:
return float(lowercase_ ) / float(max(1 , lowercase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : Optional[int] = lr_init - lr_end
A__ : Optional[int] = num_training_steps - num_warmup_steps
A__ : Any = 1 - (current_step - num_warmup_steps) / decay_steps
A__ : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowercase_ , lowercase_ , lowercase_ )
A_ : Dict = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase (lowercase_: Union[str, SchedulerType] , lowercase_: Optimizer , lowercase_: Optional[str] = None , lowercase_: Optional[int] = None , lowercase_: Optional[int] = None , lowercase_: int = 1 , lowercase_: float = 1.0 , lowercase_: int = -1 , ) -> str:
A__ : Dict = SchedulerType(lowercase_ )
A__ : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowercase_ , last_epoch=lowercase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowercase_ , step_rules=lowercase_ , last_epoch=lowercase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowercase_ , num_warmup_steps=lowercase_ , last_epoch=lowercase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowercase_ , num_warmup_steps=lowercase_ , num_training_steps=lowercase_ , num_cycles=lowercase_ , last_epoch=lowercase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowercase_ , num_warmup_steps=lowercase_ , num_training_steps=lowercase_ , power=lowercase_ , last_epoch=lowercase_ , )
return schedule_func(
lowercase_ , num_warmup_steps=lowercase_ , num_training_steps=lowercase_ , last_epoch=lowercase_ )
| 64 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 1 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 1 |
from __future__ import annotations
from typing import Any
class _a :
'''simple docstring'''
def __init__( self , A__ , A__ , A__ = 0 ):
A__ , A__ : Any = row, column
A__ : Tuple = [[default_value for c in range(A__ )] for r in range(A__ )]
def __str__( self ):
A__ : List[Any] = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
A__ : Dict = 0
for row_vector in self.array:
for obj in row_vector:
A__ : List[str] = max(A__ , len(str(A__ ) ) )
A__ : Optional[int] = F"""%{max_element_length}s"""
# Make string and return
def single_line(A__ ) -> str:
nonlocal string_format_identifier
A__ : Optional[int] = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(A__ ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def __A ( self , A__ ):
if not (isinstance(A__ , (list, tuple) ) and len(A__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , A__ ):
assert self.validate_indicies(A__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , A__ , A__ ):
assert self.validate_indicies(A__ )
A__ : Any = value
def __add__( self , A__ ):
assert isinstance(A__ , A__ )
assert self.row == another.row and self.column == another.column
# Add
A__ : List[str] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ : List[Any] = self[r, c] + another[r, c]
return result
def __neg__( self ):
A__ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ : str = -self[r, c]
return result
def __sub__( self , A__ ):
return self + (-another)
def __mul__( self , A__ ):
if isinstance(A__ , (int, float) ): # Scalar multiplication
A__ : Optional[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
A__ : str = self[r, c] * another
return result
elif isinstance(A__ , A__ ): # Matrix multiplication
assert self.column == another.row
A__ : Optional[int] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
A__ : str = F"""Unsupported type given for another ({type(A__ )})"""
raise TypeError(A__ )
def __A ( self ):
A__ : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
A__ : Optional[Any] = self[r, c]
return result
def __A ( self , A__ , A__ ):
assert isinstance(A__ , A__ ) and isinstance(A__ , A__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
A__ : Tuple = v.transpose()
A__ : List[Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCamelCase () -> None:
# a^(-1)
A__ : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
A__ : List[str] = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
A__ : Dict = Matrix(3 , 1 , 0 )
A__ , A__ , A__ : Optional[int] = 1, 2, -3
A__ : Optional[int] = Matrix(3 , 1 , 0 )
A__ , A__ , A__ : Union[str, Any] = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase_ , lowercase_ )}""" )
def UpperCamelCase () -> None:
import doctest
doctest.testmod()
testa()
| 64 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A_ : int = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 64 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
A_ : Union[str, Any] = random.Random()
def UpperCamelCase (lowercase_: str , lowercase_: Tuple=1.0 , lowercase_: Optional[Any]=None , lowercase_: List[Any]=None ) -> Optional[Any]:
if rng is None:
A__ : Union[str, Any] = global_rng
A__ : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=10 , A__=160 , A__=8 , A__=0.0 , A__=4000 , A__=False , A__=True , ):
A__ : Union[str, Any] = parent
A__ : Dict = batch_size
A__ : int = min_seq_length
A__ : Optional[Any] = max_seq_length
A__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : List[str] = padding_value
A__ : List[str] = sampling_rate
A__ : Optional[int] = return_attention_mask
A__ : Any = do_normalize
A__ : Dict = feature_size
A__ : Optional[Any] = chunk_length
A__ : Any = hop_length
def __A ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Tuple = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = WhisperFeatureExtractor if is_speech_available() else None
def __A ( self ):
A__ : Dict = WhisperFeatureExtractionTester(self )
def __A ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Union[str, Any] = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
A__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(A__ )
A__ : str = feat_extract_first.to_dict()
A__ : Union[str, Any] = feat_extract_second.to_dict()
A__ : Optional[int] = feat_extract_first.mel_filters
A__ : List[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : Union[str, Any] = os.path.join(A__ , """feat_extract.json""" )
feat_extract_first.to_json_file(A__ )
A__ : str = self.feature_extraction_class.from_json_file(A__ )
A__ : Dict = feat_extract_first.to_dict()
A__ : Tuple = feat_extract_second.to_dict()
A__ : Union[str, Any] = feat_extract_first.mel_filters
A__ : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : int = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test feature size
A__ : Union[str, Any] = feature_extractor(A__ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
A__ : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
A__ : Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Any = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : Optional[Any] = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[Any] = np.asarray(A__ )
A__ : List[str] = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test truncation required
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
A__ : Tuple = [np.asarray(A__ ) for speech_input in speech_inputs]
A__ : List[str] = [x[: feature_extractor.n_samples] for x in speech_inputs]
A__ : List[str] = [np.asarray(A__ ) for speech_input in speech_inputs_truncated]
A__ : Any = feature_extractor(A__ , return_tensors="""np""" ).input_features
A__ : str = feature_extractor(A__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
def __A ( self ):
import torch
A__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
A__ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : str = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ : Optional[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A__ ):
A__ : Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : Optional[int] = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ):
# fmt: off
A__ : Optional[int] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Optional[Any] = WhisperFeatureExtractor()
A__ : Union[str, Any] = feature_extractor(A__ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A__ , atol=1e-4 ) )
def __A ( self ):
A__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Optional[Any] = self._load_datasamples(1 )[0]
A__ : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
A__ : Optional[Any] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A__ )[0]
self.assertTrue(np.all(np.mean(A__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A__ ) - 1 ) < 1e-3 ) )
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
import argparse
import struct
import unittest
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : Optional[int] = data
# Initialize hash values
A__ : Any = [
0x6A_09E_667,
0xBB_67A_E85,
0x3C_6EF_372,
0xA5_4FF_53A,
0x51_0E5_27F,
0x9B_056_88C,
0x1F_83D_9AB,
0x5B_E0C_D19,
]
# Initialize round constants
A__ : Optional[int] = [
0x42_8A2_F98,
0x71_374_491,
0xB5_C0F_BCF,
0xE9_B5D_BA5,
0x39_56C_25B,
0x59_F11_1F1,
0x92_3F8_2A4,
0xAB_1C5_ED5,
0xD8_07A_A98,
0x12_835_B01,
0x24_318_5BE,
0x55_0C7_DC3,
0x72_BE5_D74,
0x80_DEB_1FE,
0x9B_DC0_6A7,
0xC1_9BF_174,
0xE4_9B6_9C1,
0xEF_BE4_786,
0x0F_C19_DC6,
0x24_0CA_1CC,
0x2D_E92_C6F,
0x4A_748_4AA,
0x5C_B0A_9DC,
0x76_F98_8DA,
0x98_3E5_152,
0xA8_31C_66D,
0xB0_032_7C8,
0xBF_597_FC7,
0xC6_E00_BF3,
0xD5_A79_147,
0x06_CA6_351,
0x14_292_967,
0x27_B70_A85,
0x2E_1B2_138,
0x4D_2C6_DFC,
0x53_380_D13,
0x65_0A7_354,
0x76_6A0_ABB,
0x81_C2C_92E,
0x92_722_C85,
0xA2_BFE_8A1,
0xA8_1A6_64B,
0xC2_4B8_B70,
0xC7_6C5_1A3,
0xD1_92E_819,
0xD6_990_624,
0xF4_0E3_585,
0x10_6AA_070,
0x19_A4C_116,
0x1E_376_C08,
0x27_487_74C,
0x34_B0B_CB5,
0x39_1C0_CB3,
0x4E_D8A_A4A,
0x5B_9CC_A4F,
0x68_2E6_FF3,
0x74_8F8_2EE,
0x78_A56_36F,
0x84_C87_814,
0x8C_C70_208,
0x90_BEF_FFA,
0xA4_506_CEB,
0xBE_F9A_3F7,
0xC6_717_8F2,
]
A__ : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __A ( A__ ):
A__ : Optional[Any] = b"""\x80""" + (b"""\x00""" * (63 - (len(A__ ) + 8) % 64))
A__ : List[str] = struct.pack(""">Q""" , (len(A__ ) * 8) )
return data + padding + big_endian_integer
def __A ( self ):
# Convert into blocks of 64 bytes
A__ : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ : Optional[int] = list(struct.unpack(""">16L""" , A__ ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ : Any = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ : Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ : List[str] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ : List[str] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
A__ : Optional[int] = self.ror(A__ , 6 ) ^ self.ror(A__ , 11 ) ^ self.ror(A__ , 25 )
A__ : List[Any] = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g)
A__ : List[Any] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
A__ : str = self.ror(A__ , 2 ) ^ self.ror(A__ , 13 ) ^ self.ror(A__ , 22 )
A__ : str = (a & b) ^ (a & c) ^ (b & c)
A__ : Dict = (sa + maj) % 0x100_000_000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ : Optional[int] = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
A__ : Any = [a, b, c, d, e, f, g, h]
# Modify final values
A__ : int = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
A__ : List[Any] = """""".join([hex(A__ )[2:].zfill(8 ) for value in self.hashes] )
def __A ( self , A__ , A__ ):
return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
import hashlib
A__ : Optional[Any] = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(A__ ).hash , hashlib.shaaaa(A__ ).hexdigest() )
def UpperCamelCase () -> None:
import doctest
doctest.testmod()
A__ : Any = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
A__ : Dict = parser.parse_args()
A__ : List[str] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
A__ : Union[str, Any] = f.read()
else:
A__ : Union[str, Any] = bytes(lowercase_ , """utf-8""" )
print(SHAaaa(lowercase_ ).hash )
if __name__ == "__main__":
main()
| 64 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def UpperCamelCase (lowercase_: Dataset , lowercase_: Dict[str, str] ) -> List[str]:
A__ : int = args.log_outputs
A__ : str = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
A__ : str = load_metric("""wer""" )
A__ : Optional[int] = load_metric("""cer""" )
# compute metrics
A__ : Dict = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
A__ : Union[str, Any] = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
A__ : Union[str, Any] = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , """w""" ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ : int = f"""log_{dataset_id}_predictions.txt"""
A__ : List[str] = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , """w""" ) as p, open(lowercase_ , """w""" ) as t:
# mapping function to write output
def write_to_file(lowercase_: List[Any] , lowercase_: List[Any] ):
p.write(f"""{i}""" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f"""{i}""" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(lowercase_ , with_indices=lowercase_ )
def UpperCamelCase (lowercase_: str ) -> str:
A__ : Optional[int] = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ : int = re.sub(lowercase_ , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ : Optional[Any] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
A__ : Optional[Any] = """ """.join(text.split(lowercase_ ) )
return text
def UpperCamelCase (lowercase_: Dict ) -> Any:
# load dataset
A__ : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ : List[Any] = feature_extractor.sampling_rate
# resample audio
A__ : List[str] = dataset.cast_column("""audio""" , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ : List[Any] = 0 if torch.cuda.is_available() else -1
A__ : Optional[Any] = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_: str ):
A__ : Optional[Any] = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ : str = prediction["""text"""]
A__ : Dict = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
A__ : int = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
A_ : List[Any] = parser.parse_args()
main(args)
| 64 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
A_ : Dict = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
A_ : str = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def UpperCamelCase (lowercase_: list[list[int]] ) -> list[list[int]]:
A__ : List[Any] = []
for i in range(len(lowercase_ ) ):
A__ : Union[str, Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
A__ : Dict = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
A__ : List[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase_ )
return next_generation
def UpperCamelCase (lowercase_: list[list[int]] , lowercase_: int ) -> list[Image.Image]:
A__ : List[Any] = []
for _ in range(lowercase_ ):
# Create output image
A__ : Tuple = Image.new("""RGB""" , (len(cells[0] ), len(lowercase_ )) )
A__ : List[Any] = img.load()
# Save cells to image
for x in range(len(lowercase_ ) ):
for y in range(len(cells[0] ) ):
A__ : Any = 255 - cells[y][x] * 255
A__ : Union[str, Any] = (colour, colour, colour)
# Save image
images.append(lowercase_ )
A__ : List[str] = new_generation(lowercase_ )
return images
if __name__ == "__main__":
A_ : Any = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 64 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=400 , A__=True , A__=None , A__=True , ):
A__ : List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
A__ : List[Any] = parent
A__ : Optional[int] = batch_size
A__ : Dict = num_channels
A__ : Tuple = image_size
A__ : List[str] = min_resolution
A__ : Union[str, Any] = max_resolution
A__ : Tuple = do_resize
A__ : Optional[int] = size
A__ : Any = apply_ocr
def __A ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self ):
A__ : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
A__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , """do_resize""" ) )
self.assertTrue(hasattr(A__ , """size""" ) )
self.assertTrue(hasattr(A__ , """apply_ocr""" ) )
def __A ( self ):
A__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
A__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
A__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
A__ : int = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , A__ )
self.assertIsInstance(encoding.boxes , A__ )
# Test batched
A__ : Dict = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
A__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : List[Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
A__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
A__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
A__ : List[Any] = image_processing(A__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __A ( self ):
# with apply_OCR = True
A__ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
A__ : Optional[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
A__ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
A__ : Tuple = image_processing(A__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A__ : Any = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
A__ : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A__ )
self.assertListEqual(encoding.boxes , A__ )
# with apply_OCR = False
A__ : str = LayoutLMvaImageProcessor(apply_ocr=A__ )
A__ : str = image_processing(A__ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 64 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
A_ : List[Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 1 |
from math import sqrt
def UpperCamelCase (lowercase_: int ) -> int:
A__ : List[str] = 0
for i in range(1 , int(sqrt(lowercase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase_ ):
total += i + n // i
elif i == sqrt(lowercase_ ):
total += i
return total - n
def UpperCamelCase (lowercase_: int = 10000 ) -> int:
A__ : Any = sum(
i
for i in range(1 , lowercase_ )
if sum_of_divisors(sum_of_divisors(lowercase_ ) ) == i and sum_of_divisors(lowercase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 64 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
A_ : Tuple = parser.parse_args()
A_ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
A_ : int = CLIPImageProcessor()
A_ : Dict = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
A_ : List[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 64 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 1 |
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : str = tempfile.mkdtemp()
# fmt: off
A__ : Tuple = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : Optional[int] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : Tuple = {"""unk_token""": """<unk>"""}
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
A__ : Dict = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ : Tuple = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A__ , A__ )
def __A ( self , **A__ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , **A__ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , **A__ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
A__ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ : int = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
A__ : int = self.get_tokenizer()
A__ : int = self.get_rust_tokenizer()
A__ : Any = self.get_image_processor()
A__ : List[Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : int = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
A__ : int = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A__ )
self.assertIsInstance(processor_fast.tokenizer , A__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A__ )
self.assertIsInstance(processor_fast.image_processor , A__ )
def __A ( self ):
A__ : Union[str, Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Optional[Any] = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
A__ : Dict = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def __A ( self ):
A__ : Dict = self.get_image_processor()
A__ : Optional[Any] = self.get_tokenizer()
A__ : int = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : Optional[int] = self.prepare_image_inputs()
A__ : str = image_processor(A__ , return_tensors="""np""" )
A__ : Optional[int] = processor(images=A__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ):
A__ : Union[str, Any] = self.get_image_processor()
A__ : List[Any] = self.get_tokenizer()
A__ : Optional[int] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : Dict = """lower newer"""
A__ : Dict = processor(text=A__ )
A__ : Union[str, Any] = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ):
A__ : List[str] = self.get_image_processor()
A__ : Optional[int] = self.get_tokenizer()
A__ : Union[str, Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : List[Any] = """lower newer"""
A__ : Optional[int] = self.prepare_image_inputs()
A__ : Dict = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __A ( self ):
A__ : int = self.get_image_processor()
A__ : Any = self.get_tokenizer()
A__ : Optional[Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : List[Any] = self.prepare_image_inputs()
A__ : int = self.prepare_image_inputs()
A__ : Union[str, Any] = processor(images=A__ , visual_prompt=A__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def __A ( self ):
A__ : Optional[Any] = self.get_image_processor()
A__ : str = self.get_tokenizer()
A__ : Tuple = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
A__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : List[Any] = processor.batch_decode(A__ )
A__ : int = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
| 64 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Tuple:
A__ : Dict = {}
A__ : Optional[int] = job["""started_at"""]
A__ : Tuple = job["""completed_at"""]
A__ : Any = date_parser.parse(lowercase_ )
A__ : Tuple = date_parser.parse(lowercase_ )
A__ : Dict = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ : int = start
A__ : Tuple = end
A__ : int = duration_in_min
return job_info
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: List[str]=None ) -> Tuple:
A__ : List[Any] = None
if token is not None:
A__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
A__ : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
A__ : List[str] = requests.get(lowercase_ , headers=lowercase_ ).json()
A__ : str = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
A__ : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase_ ):
A__ : Optional[int] = requests.get(url + f"""&page={i + 2}""" , headers=lowercase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
A_ : List[str] = parser.parse_args()
A_ : str = get_job_time(args.workflow_run_id)
A_ : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 64 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
def UpperCamelCase (lowercase_: int ) -> bool:
return str(lowercase_ ) == str(lowercase_ )[::-1]
def UpperCamelCase (lowercase_: int ) -> int:
return int(lowercase_ ) + int(str(lowercase_ )[::-1] )
def UpperCamelCase (lowercase_: int = 10000 ) -> int:
A__ : List[str] = []
for num in range(1 , lowercase_ ):
A__ : Dict = 0
A__ : List[Any] = num
while iterations < 50:
A__ : Optional[Any] = sum_reverse(lowercase_ )
iterations += 1
if is_palindrome(lowercase_ ):
break
else:
lychrel_nums.append(lowercase_ )
return len(lowercase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A_ : List[str] = 3
def UpperCamelCase (lowercase_: int ) -> int:
print("""Generating primitive root of p""" )
while True:
A__ : Tuple = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def UpperCamelCase (lowercase_: int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
A__ : Union[str, Any] = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
A__ : Optional[int] = primitive_root(lowercase_ ) # one primitive root on modulo p.
A__ : List[Any] = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
A__ : Tuple = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
A__ : Union[str, Any] = (key_size, e_a, e_a, p)
A__ : str = (key_size, d)
return public_key, private_key
def UpperCamelCase (lowercase_: str , lowercase_: int ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
A__ , A__ : Dict = generate_key(lowercase_ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , """w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , """w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def UpperCamelCase () -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" , 2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A__ , """num_attention_heads""" ) )
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=64 , A__=3 , A__=3 , A__=2 , A__=1 , A__=16 , A__=[128, 256, 384] , A__=[4, 6, 8] , A__=[2, 3, 4] , A__=[16, 16, 16] , A__=0 , A__=[2, 2, 2] , A__=[2, 2, 2] , A__=0.0_2 , A__=True , A__=True , A__=2 , ):
A__ : Dict = parent
A__ : str = batch_size
A__ : Dict = image_size
A__ : Optional[int] = num_channels
A__ : List[str] = kernel_size
A__ : List[str] = stride
A__ : Union[str, Any] = padding
A__ : int = hidden_sizes
A__ : Union[str, Any] = num_attention_heads
A__ : Union[str, Any] = depths
A__ : List[str] = key_dim
A__ : Any = drop_path_rate
A__ : List[Any] = patch_size
A__ : Optional[int] = attention_ratio
A__ : Optional[Any] = mlp_ratio
A__ : List[Any] = initializer_range
A__ : List[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
A__ : int = is_training
A__ : List[str] = use_labels
A__ : Tuple = num_labels
A__ : int = initializer_range
def __A ( self ):
A__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any = None
if self.use_labels:
A__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
A__ : str = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[int] = LevitModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(A__ )
A__ : str = (self.image_size, self.image_size)
A__ , A__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
A__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
A__ : Union[str, Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def __A ( self , A__ , A__ , A__ ):
A__ : Optional[Any] = self.num_labels
A__ : Any = LevitForImageClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
A__ : Tuple = self.prepare_config_and_inputs()
A__ , A__ , A__ : List[Any] = config_and_inputs
A__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__: Union[str, Any] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__: List[str] = False
UpperCAmelCase__: List[Any] = False
UpperCAmelCase__: int = False
UpperCAmelCase__: Optional[int] = False
UpperCAmelCase__: Dict = False
def __A ( self ):
A__ : Dict = LevitModelTester(self )
A__ : Optional[int] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def __A ( self ):
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def __A ( self ):
pass
def __A ( self ):
A__ , A__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : str = model_class(A__ )
A__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : str = [*signature.parameters.keys()]
A__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def __A ( self ):
def check_hidden_states_output(A__ , A__ , A__ ):
A__ : str = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
A__ : List[Any] = model(**self._prepare_for_class(A__ , A__ ) )
A__ : List[Any] = outputs.hidden_states
A__ : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(A__ ) , A__ )
A__ : List[Any] = (self.model_tester.image_size, self.model_tester.image_size)
A__ , A__ : List[str] = image_size[0], image_size[1]
for _ in range(4 ):
A__ : str = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
A__ : List[str] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Union[str, Any] = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Dict = True
check_hidden_states_output(A__ , A__ , A__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
def __A ( self , A__ , A__ , A__=False ):
A__ : Tuple = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
def __A ( self ):
if not self.model_tester.is_training:
return
A__ , A__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
A__ : Dict = model_class(A__ )
model.to(A__ )
model.train()
A__ : Union[str, Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
A__ : List[Any] = model(**A__ ).loss
loss.backward()
def __A ( self ):
A__ , A__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A__ : Dict = False
A__ : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(A__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
A__ : List[Any] = model_class(A__ )
model.gradient_checkpointing_enable()
model.to(A__ )
model.train()
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
A__ : Any = model(**A__ ).loss
loss.backward()
def __A ( self ):
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
A__ : Any = problem_type["""title"""]
A__ : Any = problem_type["""num_labels"""]
A__ : List[Any] = model_class(A__ )
model.to(A__ )
model.train()
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ , return_labels=A__ )
if problem_type["num_labels"] > 1:
A__ : Any = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
A__ : Dict = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A__ ) as warning_list:
A__ : Dict = model(**A__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __A ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = LevitModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCamelCase () -> Dict:
A__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __A ( self ):
A__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A__ )
A__ : Optional[int] = self.default_image_processor
A__ : List[Any] = prepare_img()
A__ : Dict = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
A__ : Optional[int] = model(**A__ )
# verify the logits
A__ : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
A__ : Any = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 64 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Any , lowercase_: int ) -> Optional[int]:
A__ : int = multiprocessing.Manager()
A__ : Optional[Any] = manager.list()
A__ : List[str] = multiprocessing.Process(target=lowercase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("""timed out""" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase (lowercase_: Any , lowercase_: Dict , lowercase_: Dict ) -> Union[str, Any]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ : Optional[int] = shutil.rmtree
A__ : List[str] = os.rmdir
A__ : List[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ : Optional[Any] = {}
with swallow_io():
with time_limit(lowercase_ ):
exec(lowercase_ , lowercase_ )
result.append("""passed""" )
except TimeoutException:
result.append("""timed out""" )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
A__ : Dict = rmtree
A__ : Any = rmdir
A__ : Tuple = chdir
@contextlib.contextmanager
def UpperCamelCase (lowercase_: str ) -> Optional[int]:
def signal_handler(lowercase_: Tuple , lowercase_: Union[str, Any] ):
raise TimeoutException("""Timed out!""" )
signal.setitimer(signal.ITIMER_REAL , lowercase_ )
signal.signal(signal.SIGALRM , lowercase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCamelCase () -> List[Any]:
A__ : int = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase_ ):
with contextlib.redirect_stderr(lowercase_ ):
with redirect_stdin(lowercase_ ):
yield
@contextlib.contextmanager
def UpperCamelCase () -> List[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase_ ):
yield dirname
class _a (__magic_name__ ):
'''simple docstring'''
pass
class _a (io.StringIO ):
'''simple docstring'''
def __A ( self , *A__ , **A__ ):
raise OSError
def __A ( self , *A__ , **A__ ):
raise OSError
def __A ( self , *A__ , **A__ ):
raise OSError
def __A ( self , *A__ , **A__ ):
return False
class _a (contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
UpperCAmelCase__: Any = '''stdin'''
@contextlib.contextmanager
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[Any]:
if root == ".":
yield
return
A__ : List[str] = os.getcwd()
os.chdir(lowercase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase_ )
def UpperCamelCase (lowercase_: List[Any]=None ) -> Tuple:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ : Dict = None
A__ : List[Any] = None
import os
A__ : List[str] = """1"""
A__ : Optional[Any] = None
A__ : List[Any] = None
A__ : Tuple = None
A__ : List[Any] = None
A__ : int = None
A__ : List[str] = None
A__ : Union[str, Any] = None
A__ : Any = None
A__ : Any = None
A__ : Optional[int] = None
A__ : Union[str, Any] = None
A__ : Tuple = None
A__ : Tuple = None
A__ : Any = None
A__ : Dict = None
A__ : Union[str, Any] = None
A__ : Union[str, Any] = None
A__ : str = None
A__ : Any = None
A__ : List[str] = None
A__ : Tuple = None
A__ : List[Any] = None
A__ : Optional[Any] = None
A__ : Union[str, Any] = None
A__ : Union[str, Any] = None
A__ : Any = None
A__ : int = None
import shutil
A__ : List[Any] = None
A__ : List[str] = None
A__ : Union[str, Any] = None
import subprocess
A__ : Optional[Any] = None # type: ignore
A__ : List[str] = None
import sys
A__ : List[Any] = None
A__ : Any = None
A__ : int = None
A__ : List[str] = None
A__ : List[Any] = None
| 64 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: int = '''facebook/bart-large-mnli'''
UpperCAmelCase__: Tuple = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCAmelCase__: Dict = '''text_classifier'''
UpperCAmelCase__: Optional[Any] = AutoTokenizer
UpperCAmelCase__: List[Any] = AutoModelForSequenceClassification
UpperCAmelCase__: List[Any] = ['''text''', ['''text''']]
UpperCAmelCase__: Optional[Any] = ['''text''']
def __A ( self ):
super().setup()
A__ : List[str] = self.model.config
A__ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A__ : str = int(A__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __A ( self , A__ , A__ ):
A__ : Tuple = labels
return self.pre_processor(
[text] * len(A__ ) , [F"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __A ( self , A__ ):
A__ : List[Any] = outputs.logits
A__ : int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 64 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 1 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : List[Any] = get_activation("""swish""" )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __A ( self ):
A__ : Tuple = get_activation("""silu""" )
self.assertIsInstance(A__ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __A ( self ):
A__ : Optional[int] = get_activation("""mish""" )
self.assertIsInstance(A__ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __A ( self ):
A__ : Optional[int] = get_activation("""gelu""" )
self.assertIsInstance(A__ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: torch.FloatTensor
class _a (__magic_name__ , __magic_name__ ):
'''simple docstring'''
@register_to_config
def __init__( self , A__ = 6_5536 , A__ = None , A__ = 2 , A__ = 2 , A__ = 0 , A__ = "fourier" , A__ = True , A__ = False , A__ = 0.0 , A__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A__ = "UNetMidBlock1D" , A__ = None , A__ = (32, 32, 64) , A__ = None , A__ = 8 , A__ = 1 , A__ = False , ):
super().__init__()
A__ : str = sample_size
# time
if time_embedding_type == "fourier":
A__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A__ , log=A__ , flip_sin_to_cos=A__ )
A__ : Optional[int] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A__ : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A__ , downscale_freq_shift=A__ )
A__ : int = block_out_channels[0]
if use_timestep_embedding:
A__ : Any = block_out_channels[0] * 4
A__ : List[Any] = TimestepEmbedding(
in_channels=A__ , time_embed_dim=A__ , act_fn=A__ , out_dim=block_out_channels[0] , )
A__ : int = nn.ModuleList([] )
A__ : Tuple = None
A__ : List[str] = nn.ModuleList([] )
A__ : int = None
# down
A__ : List[str] = in_channels
for i, down_block_type in enumerate(A__ ):
A__ : Optional[Any] = output_channel
A__ : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A__ : str = i == len(A__ ) - 1
A__ : Any = get_down_block(
A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A__ )
# mid
A__ : List[str] = get_mid_block(
A__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A__ , add_downsample=A__ , )
# up
A__ : Union[str, Any] = list(reversed(A__ ) )
A__ : int = reversed_block_out_channels[0]
if out_block_type is None:
A__ : str = out_channels
else:
A__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(A__ ):
A__ : List[str] = output_channel
A__ : str = (
reversed_block_out_channels[i + 1] if i < len(A__ ) - 1 else final_upsample_channels
)
A__ : int = i == len(A__ ) - 1
A__ : str = get_up_block(
A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A__ )
A__ : Dict = output_channel
# out
A__ : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
A__ : List[Any] = get_out_block(
out_block_type=A__ , num_groups_out=A__ , embed_dim=block_out_channels[0] , out_channels=A__ , act_fn=A__ , fc_dim=block_out_channels[-1] // 4 , )
def __A ( self , A__ , A__ , A__ = True , ):
A__ : Any = timestep
if not torch.is_tensor(A__ ):
A__ : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
A__ : Optional[Any] = timesteps[None].to(sample.device )
A__ : List[str] = self.time_proj(A__ )
if self.config.use_timestep_embedding:
A__ : Tuple = self.time_mlp(A__ )
else:
A__ : Optional[int] = timestep_embed[..., None]
A__ : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A__ : Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A__ : List[str] = ()
for downsample_block in self.down_blocks:
A__ , A__ : str = downsample_block(hidden_states=A__ , temb=A__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A__ : Any = self.mid_block(A__ , A__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A__ : int = down_block_res_samples[-1:]
A__ : str = down_block_res_samples[:-1]
A__ : List[Any] = upsample_block(A__ , res_hidden_states_tuple=A__ , temb=A__ )
# 5. post-process
if self.out_block:
A__ : str = self.out_block(A__ , A__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A__ )
| 64 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = '''MCTCTFeatureExtractor'''
UpperCAmelCase__: Any = '''AutoTokenizer'''
def __init__( self , A__ , A__ ):
super().__init__(A__ , A__ )
A__ : str = self.feature_extractor
A__ : Optional[Any] = False
def __call__( self , *A__ , **A__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
A__ : int = kwargs.pop("""raw_speech""" )
else:
A__ : Union[str, Any] = kwargs.pop("""audio""" , A__ )
A__ : Dict = kwargs.pop("""sampling_rate""" , A__ )
A__ : Union[str, Any] = kwargs.pop("""text""" , A__ )
if len(A__ ) > 0:
A__ : List[str] = args[0]
A__ : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
A__ : List[Any] = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
A__ : Optional[Any] = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ : Optional[int] = encodings["""input_ids"""]
return inputs
def __A ( self , *A__ , **A__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ , **A__ )
A__ : List[str] = kwargs.pop("""input_features""" , A__ )
A__ : Union[str, Any] = kwargs.pop("""labels""" , A__ )
if len(A__ ) > 0:
A__ : int = args[0]
A__ : Any = args[1:]
if input_features is not None:
A__ : Optional[int] = self.feature_extractor.pad(A__ , *A__ , **A__ )
if labels is not None:
A__ : Optional[Any] = self.tokenizer.pad(A__ , **A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A__ : Dict = labels["""input_ids"""]
return input_features
def __A ( self , *A__ , **A__ ):
return self.tokenizer.decode(*A__ , **A__ )
@contextmanager
def __A ( self ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
A__ : Optional[Any] = True
A__ : Union[str, Any] = self.tokenizer
yield
A__ : Optional[Any] = self.feature_extractor
A__ : Optional[Any] = False
| 64 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A_ : Dict = get_tests_dir('fixtures')
A_ : List[str] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
A_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : List[Any] = 0
def __A ( self ):
A__ : List[Any] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
A__ : Optional[int] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
A__ : Any = AutoFeatureExtractor.from_pretrained(A__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
A__ : str = WavaVecaFeatureExtractor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
A__ : Optional[int] = AutoFeatureExtractor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
A__ : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
def __A ( self ):
with self.assertRaisesRegex(
A__ , """bert-base is not a local folder and is not a valid model identifier""" ):
A__ : int = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __A ( self ):
with self.assertRaisesRegex(
A__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(A__ , revision="""aaaaaa""" )
def __A ( self ):
with self.assertRaisesRegex(
A__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
A__ : Optional[int] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __A ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A__ ):
A__ : str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
A__ : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=A__ )
A__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A__ )
A__ : Any = AutoFeatureExtractor.from_pretrained(A__ , trust_remote_code=A__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __A ( self ):
try:
AutoConfig.register("""custom""" , A__ )
AutoFeatureExtractor.register(A__ , A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoFeatureExtractor.register(A__ , A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
A__ : Dict = CustomFeatureExtractor.from_pretrained(A__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A__ )
A__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ , A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __A ( self ):
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = True
try:
AutoConfig.register("""custom""" , A__ )
AutoFeatureExtractor.register(A__ , A__ )
# If remote code is not set, the default is to use local
A__ : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
A__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
A__ : Any = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(A__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 64 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
@property
def __A ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ):
A__ : Optional[int] = ort.SessionOptions()
A__ : Union[str, Any] = False
return options
def __A ( self ):
A__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
A__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
A__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
A__ : Dict = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
A__ : Dict = """A red cat sitting on a park bench"""
A__ : List[Any] = np.random.RandomState(0 )
A__ : Union[str, Any] = pipe(
prompt=A__ , image=A__ , mask_image=A__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=A__ , output_type="""np""" , )
A__ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 1 |
from __future__ import annotations
def UpperCamelCase (lowercase_: str , lowercase_: list[str] | None = None ) -> list[list[str]]:
A__ : Dict = word_bank or []
# create a table
A__ : int = len(lowercase_ ) + 1
A__ : list[list[list[str]]] = []
for _ in range(lowercase_ ):
table.append([] )
# seed value
A__ : Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase_ )] == word:
A__ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase_ )]:
combination.reverse()
return table[len(lowercase_ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 64 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''xlm-roberta'''
def __init__( self , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=True , A__=None , **A__ , ):
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
A__ : Optional[int] = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : str = num_hidden_layers
A__ : List[str] = num_attention_heads
A__ : Tuple = hidden_act
A__ : Optional[int] = intermediate_size
A__ : Tuple = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = type_vocab_size
A__ : Tuple = initializer_range
A__ : Dict = layer_norm_eps
A__ : List[str] = position_embedding_type
A__ : Union[str, Any] = use_cache
A__ : Union[str, Any] = classifier_dropout
class _a (__magic_name__ ):
'''simple docstring'''
@property
def __A ( self ):
if self.task == "multiple-choice":
A__ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 64 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=128 , A__=32 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ):
A__ : List[Any] = parent
A__ : int = batch_size
A__ : Union[str, Any] = seq_length
A__ : List[str] = is_training
A__ : Optional[Any] = use_input_mask
A__ : str = use_token_type_ids
A__ : Union[str, Any] = use_labels
A__ : Optional[int] = vocab_size
A__ : Optional[int] = hidden_size
A__ : Any = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : List[str] = intermediate_size
A__ : Any = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : str = attention_probs_dropout_prob
A__ : Any = max_position_embeddings
A__ : List[str] = type_vocab_size
A__ : Dict = type_sequence_label_size
A__ : Optional[int] = initializer_range
A__ : str = num_labels
A__ : Any = num_choices
A__ : Union[str, Any] = scope
def __A ( self ):
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Optional[int] = None
if self.use_input_mask:
A__ : int = random_attention_mask([self.batch_size, self.seq_length] )
A__ : str = None
if self.use_token_type_ids:
A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Tuple = None
A__ : int = None
A__ : str = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
A__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
def __A ( self ):
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[Any] = self.prepare_config_and_inputs()
A__ : Union[str, Any] = True
A__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : List[Any] = NezhaModel(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ )
A__ : str = model(A__ , token_type_ids=A__ )
A__ : Optional[int] = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ):
A__ : List[str] = True
A__ : List[Any] = NezhaModel(A__ )
model.to(A__ )
model.eval()
A__ : Tuple = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
A__ : Optional[Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , encoder_hidden_states=A__ , )
A__ : Dict = model(A__ , attention_mask=A__ , token_type_ids=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Any = NezhaForMaskedLM(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[Any] = NezhaForNextSentencePrediction(config=A__ )
model.to(A__ )
model.eval()
A__ : int = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = NezhaForPreTraining(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , next_sentence_label=A__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = NezhaForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
A__ : str = model(
A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Optional[int] = self.num_labels
A__ : Optional[int] = NezhaForSequenceClassification(A__ )
model.to(A__ )
model.eval()
A__ : List[Any] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Union[str, Any] = self.num_labels
A__ : List[Any] = NezhaForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
A__ : List[str] = model(A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : int = self.num_choices
A__ : Optional[int] = NezhaForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
A__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A__ : int = model(
A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
A__ : Any = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[str] = config_and_inputs
A__ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__: List[Any] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__: List[Any] = True
def __A ( self , A__ , A__ , A__=False ):
A__ : Tuple = super()._prepare_for_class(A__ , A__ , return_labels=A__ )
if return_labels:
if model_class in get_values(A__ ):
A__ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A__ )
A__ : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
return inputs_dict
def __A ( self ):
A__ : Optional[int] = NezhaModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A__ )
def __A ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def __A ( self ):
A__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__ )
def __A ( self ):
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def __A ( self ):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def __A ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Union[str, Any] = NezhaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@slow
@require_torch_gpu
def __A ( self ):
A__ , A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
A__ : Optional[Any] = True
A__ : Dict = model_class(config=A__ )
A__ : Optional[Any] = self._prepare_for_class(A__ , A__ )
A__ : List[Any] = torch.jit.trace(
A__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A__ , os.path.join(A__ , """bert.pt""" ) )
A__ : Optional[Any] = torch.jit.load(os.path.join(A__ , """bert.pt""" ) , map_location=A__ )
loaded(inputs_dict["""input_ids"""].to(A__ ) , inputs_dict["""attention_mask"""].to(A__ ) )
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Optional[int] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
A__ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : Any = model(A__ , attention_mask=A__ )[0]
A__ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A__ )
A__ : Optional[Any] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ : List[Any] = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
A__ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A__ : List[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Union[str, Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , A__ )
A__ : Optional[int] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A__ , atol=1e-4 ) )
| 64 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 1 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A_ : Dict = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Any = ['''input_values''', '''attention_mask''']
def __init__( self , A__ = 1 , A__ = 1_6000 , A__ = 0.0 , A__ = False , A__ = 80 , A__ = 16 , A__ = 64 , A__ = "hann_window" , A__ = 1.0 , A__ = 80 , A__ = 7600 , A__ = 1e-10 , A__ = 2 , A__ = True , **A__ , ):
super().__init__(feature_size=A__ , sampling_rate=A__ , padding_value=A__ , **A__ )
A__ : Dict = do_normalize
A__ : int = return_attention_mask
A__ : Union[str, Any] = num_mel_bins
A__ : Optional[int] = hop_length
A__ : Any = win_length
A__ : Any = win_function
A__ : List[str] = frame_signal_scale
A__ : List[str] = fmin
A__ : Optional[int] = fmax
A__ : Optional[int] = mel_floor
A__ : Any = reduction_factor
A__ : Optional[int] = win_length * sampling_rate // 1000
A__ : Union[str, Any] = hop_length * sampling_rate // 1000
A__ : Tuple = optimal_fft_length(self.sample_size )
A__ : Any = (self.n_fft // 2) + 1
A__ : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=A__ )
A__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , A__ , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , A__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( A__ , A__ , A__ = 0.0 ):
if attention_mask is not None:
A__ : Tuple = np.array(A__ , np.intaa )
A__ : List[Any] = []
for vector, length in zip(A__ , attention_mask.sum(-1 ) ):
A__ : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
A__ : Optional[int] = padding_value
normed_input_values.append(A__ )
else:
A__ : Union[str, Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __A ( self , A__ , ):
A__ : Any = spectrogram(
A__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , A__ = None , A__ = None , A__ = False , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , A__ = None , **A__ , ):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
A__ : str = self._process_audio(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , **A__ , )
else:
A__ : Dict = None
if audio_target is not None:
A__ : Tuple = self._process_audio(
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , **A__ , )
if inputs is None:
return inputs_target
else:
A__ : Optional[Any] = inputs_target["""input_values"""]
A__ : Dict = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
A__ : str = decoder_attention_mask
return inputs
def __A ( self , A__ , A__ = False , A__ = False , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , **A__ , ):
A__ : str = isinstance(A__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
A__ : List[str] = is_batched_numpy or (
isinstance(A__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ : Union[str, Any] = [np.asarray(A__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(A__ , np.ndarray ):
A__ : Optional[Any] = np.asarray(A__ , dtype=np.floataa )
elif isinstance(A__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ : str = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ : Optional[Any] = [speech]
# needed to make pad() work on spectrogram inputs
A__ : Dict = self.feature_size
# convert into correct format for padding
if is_target:
A__ : Any = [self._extract_mel_features(A__ ) for waveform in speech]
A__ : Union[str, Any] = BatchFeature({"""input_values""": features} )
A__ : List[Any] = self.num_mel_bins
else:
A__ : int = BatchFeature({"""input_values""": speech} )
A__ : List[str] = self.pad(
A__ , padding=A__ , max_length=A__ , truncation=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , **A__ , )
A__ : str = feature_size_hack
# convert input values to correct format
A__ : Optional[Any] = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
A__ : Optional[int] = [np.asarray(A__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(A__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ : Tuple = [array.astype(np.floataa ) for array in input_values]
elif isinstance(A__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ : int = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ : Any = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A__ : Optional[Any] = [np.asarray(A__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ : Any = (
attention_mask
if self._get_padding_strategies(A__ , max_length=A__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ : int = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=A__ , padding_value=self.padding_value )
if return_tensors is not None:
A__ : Union[str, Any] = padded_inputs.convert_to_tensors(A__ )
return padded_inputs
def __A ( self ):
A__ : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ : List[Any] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 64 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : int = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: Tuple ) -> str:
A__ : Dict = torch.load(lowercase_ , map_location="""cpu""" )
if "model" in sd.keys():
A__ : List[Any] = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
A__ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
A__ : Any = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A__ : List[Any] = sd.pop(lowercase_ )
A__ : Optional[Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A__ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
A__ : List[str] = key.replace(""".qkv_proj.""" , """.q_proj.""" )
A__ : Optional[int] = key.replace(""".qkv_proj.""" , """.k_proj.""" )
A__ : Any = key.replace(""".qkv_proj.""" , """.v_proj.""" )
A__ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A__ , A__ , A__ : int = torch.split(lowercase_ , depth // 3 , dim=0 )
A__ : Any = q
A__ : Optional[int] = k
A__ : List[Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: List[str] , lowercase_: Optional[Any]=None ) -> Any:
A__ : Union[str, Any] = load_checkpoint(lowercase_ )
if config is not None:
A__ : Any = OPTConfig.from_pretrained(lowercase_ )
else:
A__ : List[str] = OPTConfig()
A__ : Optional[Any] = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
A_ : Dict = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 1 |
def UpperCamelCase (lowercase_: list , lowercase_: int = 0 ) -> list:
A__ : Any = length or len(lowercase_ )
A__ : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A__ , A__ : Optional[Any] = list_data[i + 1], list_data[i]
A__ : Optional[int] = True
return list_data if not swapped else bubble_sort(lowercase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def UpperCamelCase () -> Union[str, Any]:
A__ : List[str] = 10
A__ : List[str] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
A__ : Optional[int] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase_ ) ),
} , features=lowercase_ , )
return dataset
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Tuple ) -> Optional[Any]:
A__ : Any = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase_ )
return filename
# FILE_CONTENT + files
A_ : int = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: int ) -> Tuple:
A__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
A__ : Tuple = FILE_CONTENT
with open(lowercase_ , """w""" ) as f:
f.write(lowercase_ )
return filename
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Dict ) -> List[Any]:
import bza
A__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
A__ : Tuple = bytes(lowercase_ , """utf-8""" )
with bza.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: str ) -> Any:
import gzip
A__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
A__ : Optional[int] = bytes(lowercase_ , """utf-8""" )
with gzip.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Dict ) -> Tuple:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
A__ : Any = bytes(lowercase_ , """utf-8""" )
with lza.frame.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Dict , lowercase_: Any ) -> int:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A__ : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase_ , """w""" ) as archive:
archive.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Tuple , lowercase_: str ) -> Dict:
import tarfile
A__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase_ , """w""" ) as f:
f.add(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Tuple ) -> List[str]:
import lzma
A__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
A__ : List[str] = bytes(lowercase_ , """utf-8""" )
with lzma.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Dict , lowercase_: Union[str, Any] ) -> Union[str, Any]:
import zipfile
A__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[str] ) -> int:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A__ : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
A__ : List[str] = bytes(lowercase_ , """utf-8""" )
with zstd.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Any ) -> Optional[int]:
A__ : int = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
A__ : str = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase_ , """w""" ) as f:
f.write(lowercase_ )
return filename
A_ : Optional[Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
A_ : List[str] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
A_ : Tuple = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
A_ : List[str] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
A_ : Optional[int] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def UpperCamelCase () -> Optional[Any]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Tuple ) -> Any:
A__ : Union[str, Any] = datasets.Dataset.from_dict(lowercase_ )
A__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Dict ) -> Optional[int]:
A__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase_ ) ) as con:
A__ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Any ) -> Optional[int]:
A__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase_ , """w""" , newline="""""" ) as f:
A__ : List[Any] = csv.DictWriter(lowercase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
A__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase_ , """w""" , newline="""""" ) as f:
A__ : List[str] = csv.DictWriter(lowercase_ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[str] , lowercase_: Any ) -> Optional[Any]:
import bza
A__ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase_ , """rb""" ) as f:
A__ : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase_ , """wb""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any] , lowercase_: int ) -> Optional[Any]:
A__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: Tuple ) -> List[str]:
A__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase_ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Tuple , lowercase_: Dict , lowercase_: int ) -> str:
A__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: int ) -> str:
A__ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
A__ : Union[str, Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase_ , """wb""" ) as f:
A__ : List[str] = pq.ParquetWriter(lowercase_ , schema=lowercase_ )
A__ : str = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase_ ) )] for k in DATA[0]} , schema=lowercase_ )
writer.write_table(lowercase_ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Tuple ) -> Optional[Any]:
A__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A__ : Union[str, Any] = {"""data""": DATA}
with open(lowercase_ , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[str] ) -> str:
A__ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A__ : Dict = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase_ , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[Any] ) -> Optional[Any]:
A__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[str] ) -> Dict:
A__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase_ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[Any] ) -> Optional[Any]:
A__ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase_ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> str:
A__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase_ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase_ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Tuple , lowercase_: int ) -> Optional[Any]:
import gzip
A__ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase_ , """rb""" ) as orig_file:
with gzip.open(lowercase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: int , lowercase_: Optional[int] ) -> List[str]:
import gzip
A__ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase_ , """rb""" ) as orig_file:
with gzip.open(lowercase_ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[str] , lowercase_: List[str] , lowercase_: Union[str, Any] ) -> Optional[Any]:
A__ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[str] , lowercase_: List[str] , lowercase_: Union[str, Any] , lowercase_: str ) -> Optional[int]:
A__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.join("""nested""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Any , lowercase_: Any ) -> Tuple:
A__ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] , lowercase_: Tuple ) -> Any:
A__ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase_ , """w""" ) as f:
f.add(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.add(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple , lowercase_: Union[str, Any] , lowercase_: int ) -> int:
A__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase_ , """w""" ) as f:
f.add(lowercase_ , arcname=os.path.join("""nested""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Any ) -> Union[str, Any]:
A__ : Union[str, Any] = ["""0""", """1""", """2""", """3"""]
A__ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> int:
A__ : int = ["""0""", """1""", """2""", """3"""]
A__ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Union[str, Any] ) -> int:
A__ : Optional[int] = ["""0""", """1""", """2""", """3"""]
A__ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase_ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: int , lowercase_: Tuple , lowercase_: Union[str, Any] ) -> List[Any]:
A__ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: int , lowercase_: Any , lowercase_: Optional[Any] ) -> int:
A__ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
f.write(lowercase_ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase_ ) ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Dict , lowercase_: int , lowercase_: Any ) -> Optional[int]:
A__ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase_ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Any ) -> Optional[Any]:
A__ : int = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
A__ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase_ )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase () -> Union[str, Any]:
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def UpperCamelCase () -> int:
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Optional[Any] ) -> int:
A__ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase_ , """w""" ) as f:
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ) )
f.write(lowercase_ , arcname=os.path.basename(lowercase_ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def UpperCamelCase (lowercase_: Tuple ) -> List[str]:
A__ : str = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 64 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 1 |
from math import pi, sqrt, tan
def UpperCamelCase (lowercase_: float ) -> float:
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def UpperCamelCase (lowercase_: float ) -> float:
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def UpperCamelCase (lowercase_: float ) -> float:
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
A__ : int = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(lowercase_ , 2 ) * torus_radius * tube_radius
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def UpperCamelCase (lowercase_: float ) -> float:
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
A__ : Optional[Any] = (sidea + sidea + sidea) / 2
A__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def UpperCamelCase (lowercase_: float , lowercase_: float , lowercase_: float ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def UpperCamelCase (lowercase_: float ) -> float:
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def UpperCamelCase (lowercase_: float , lowercase_: float ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def UpperCamelCase (lowercase_: int , lowercase_: float ) -> float:
if not isinstance(lowercase_ , lowercase_ ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 64 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 1 |
def UpperCamelCase (lowercase_: list ) -> float:
A__ : List[Any] = 0
while len(lowercase_ ) > 1:
A__ : Dict = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
A__ : List[str] = files.index(min(lowercase_ ) )
temp += files[min_index]
files.pop(lowercase_ )
files.append(lowercase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A_ : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a (datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__: Optional[datasets.Features] = None
UpperCAmelCase__: str = "utf-8"
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: Optional[str] = None
UpperCAmelCase__: bool = True # deprecated
UpperCAmelCase__: Optional[int] = None # deprecated
UpperCAmelCase__: int = 10 << 20 # 10MB
UpperCAmelCase__: Optional[bool] = None
class _a (datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__: List[str] = JsonConfig
def __A ( self ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
A__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , A__ ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
A__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A__ , (str, list, tuple) ):
A__ : Optional[Any] = data_files
if isinstance(A__ , A__ ):
A__ : List[str] = [files]
A__ : int = [dl_manager.iter_files(A__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
A__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(A__ , A__ ):
A__ : Optional[int] = [files]
A__ : Optional[int] = [dl_manager.iter_files(A__ ) for file in files]
splits.append(datasets.SplitGenerator(name=A__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , A__ ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A__ : Optional[Any] = self.config.features.arrow_schema.field(A__ ).type
A__ : str = pa_table.append_column(A__ , pa.array([None] * len(A__ ) , type=A__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ : Optional[int] = table_cast(A__ , self.config.features.arrow_schema )
return pa_table
def __A ( self , A__ ):
for file_idx, file in enumerate(itertools.chain.from_iterable(A__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
# We keep only the field we are interested in
A__ : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(A__ , (list, tuple) ):
A__ : Union[str, Any] = set().union(*[row.keys() for row in dataset] )
A__ : Any = {col: [row.get(A__ ) for row in dataset] for col in keys}
else:
A__ : Any = dataset
A__ : Any = pa.Table.from_pydict(A__ )
yield file_idx, self._cast_table(A__ )
# If the file has one json object per line
else:
with open(A__ , """rb""" ) as f:
A__ : List[str] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
A__ : Any = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
A__ : Dict = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(A__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ : List[Any] = batch.decode(self.config.encoding , errors=A__ ).encode("""utf-8""" )
try:
while True:
try:
A__ : str = paj.read_json(
io.BytesIO(A__ ) , read_options=paj.ReadOptions(block_size=A__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(A__ , pa.ArrowInvalid )
and "straddling" not in str(A__ )
or block_size > len(A__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(A__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
A__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A__ : Optional[Any] = json.load(A__ )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(A__ , A__ ): # list is the only sequence type supported in JSON
try:
A__ : str = set().union(*[row.keys() for row in dataset] )
A__ : List[str] = {col: [row.get(A__ ) for row in dataset] for col in keys}
A__ : int = pa.Table.from_pydict(A__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(A__ )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(A__ )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(A__ )
batch_idx += 1
| 64 | 1 |
from __future__ import annotations
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[Any] , lowercase_: int , lowercase_: Optional[int] , lowercase_: str , ) -> None:
A__ : Dict = len(SCREAMING_SNAKE_CASE_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def UpperCamelCase (lowercase_: str ) -> None:
A__ : str = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE_ )
print("""""" )
print(len(SCREAMING_SNAKE_CASE_ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 700 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Dict = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 64 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Tuple = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _a (_snake_case ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''data2vec-text'''
def __init__( self , A__=3_0522 , A__=768 , A__=12 , A__=12 , A__=3072 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=True , A__=None , **A__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
A__ : Optional[Any] = vocab_size
A__ : Dict = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : str = num_attention_heads
A__ : Tuple = hidden_act
A__ : List[str] = intermediate_size
A__ : List[str] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : Dict = max_position_embeddings
A__ : List[Any] = type_vocab_size
A__ : Any = initializer_range
A__ : Dict = layer_norm_eps
A__ : List[Any] = position_embedding_type
A__ : str = use_cache
A__ : Any = classifier_dropout
class _a (_snake_case ):
'''simple docstring'''
@property
def __A ( self ):
if self.task == "multiple-choice":
A__ : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 701 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCamelCase (*lowercase_: Optional[int] , lowercase_: Optional[Union[Dict, Any]] = None , lowercase_: Dict=True , lowercase_: Tuple=2 ) -> Dict:
from .. import __version__
A__ : Dict = take_from
A__ : str = ()
if not isinstance(args[0] , lowercase_ ):
A__ : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
f""" version {__version__} is >= {version_name}""" )
A__ : Any = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
A__ : List[str] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
A__ : Optional[Any] = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
A__ : int = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
A__ : int = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
A__ : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
A__ : Optional[Any] = call_frame.filename
A__ : Optional[int] = call_frame.lineno
A__ : Any = call_frame.function
A__ , A__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 64 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCamelCase (lowercase_: List[Any] = 8 ) -> str:
A__ : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Tuple ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
A__ : Optional[int] = i // 3
A__ : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
A__ : Dict = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
A__ : Any = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def UpperCamelCase (lowercase_: str , lowercase_: int ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def UpperCamelCase (lowercase_: int , lowercase_: List[str] ) -> Any:
pass # Put your code here...
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: Optional[Any] ) -> List[str]:
pass # Put your code here...
def UpperCamelCase (lowercase_: str , lowercase_: Dict ) -> Optional[int]:
pass # Put your code here...
def UpperCamelCase (lowercase_: List[str] , lowercase_: List[Any] = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
A__ : Optional[int] = any(char in ascii_uppercase for char in password )
A__ : List[Any] = any(char in ascii_lowercase for char in password )
A__ : Optional[int] = any(char in digits for char in password )
A__ : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCamelCase () -> str:
A__ : List[Any] = int(input("""Please indicate the max length of your password: """ ).strip() )
A__ : Union[str, Any] = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 702 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCamelCase (lowercase_: List[str] , lowercase_: str ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ : Union[str, Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : Optional[int] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
A__ : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
A__ : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def UpperCamelCase (lowercase_: Tuple , lowercase_: Optional[int] , lowercase_: str ) -> Union[str, Any]:
if "metadata" in layer:
A__ : Tuple = layer.split("""metadata""" )
A__ : Optional[Any] = """""".join(split_layer[0] )[:-1]
A__ : Optional[Any] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
A__ : str = layer.split("""kvstore""" )
A__ : int = """""".join(split_layer[0] )[:-1]
A__ : Optional[int] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
A__ : Any = layer.split("""/""" )
A__ : int = """/""".join(split_layer[:-1] )
A__ : str = (split_layer[-1],)
if "kvstore/path" in layer:
A__ : Dict = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
A__ : Optional[int] = """file"""
else:
A__ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCamelCase (lowercase_: str , lowercase_: List[Any] ) -> int:
A__ : int = rename_keys(lowercase_ )
A__ : Any = {}
for k, v in current_block.items():
A__ : Dict = v
A__ : str = new_current_block
torch.save(lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[Any] , lowercase_: Optional[Any] , lowercase_: Optional[int] , lowercase_: str = WEIGHTS_NAME ) -> Tuple:
A__ : Optional[int] = convert_file_size_to_int(lowercase_ )
A__ : List[Any] = []
A__ : int = {}
A__ : List[str] = 0
A__ : Any = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
A__ : Optional[Any] = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
A__ : Dict = flatten_dict(lowercase_ , sep="""/""" )
A__ : Any = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
A__ : Optional[int] = content
else:
A__ : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ : List[Any] = torch.tensor(lowercase_ )
A__ : List[Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ : Any = rename_base_flax_keys(tuple(key.split("""/""" ) ) , lowercase_ )
A__ : Any = """/""".join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ : List[Any] = os.path.join(
lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ : Any = {}
A__ : str = 0
A__ : List[str] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ : Union[str, Any] = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{len(lowercase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ : str = {}
A__ : Any = {}
for idx, shard in enumerate(lowercase_ ):
A__ : Any = weights_name.replace(
""".bin""" , f"""-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
A__ : Dict = os.path.join(lowercase_ , weights_name.replace(""".bin""" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
A__ : str = shard
for key in shard:
A__ : Any = shard_file
# Add the metadata
A__ : Tuple = {"""total_size""": total_size}
A__ : Union[str, Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , """w""" , encoding="""utf-8""" ) as f:
A__ : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + """\n"""
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCamelCase () -> int:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ : str = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
A__ : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
A__ : Tuple = TaTokenizer.from_pretrained("""t5-small""" )
A__ : Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
A__ : Union[str, Any] = tokenizer(lowercase_ , return_tensors="""pt""" ).input_ids
A__ : Tuple = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 64 | 0 |
def UpperCamelCase (lowercase_: Dict ) -> str:
return "".join([hex(lowercase_ )[2:].zfill(2 ).upper() for byte in list(lowercase_ )] )
def UpperCamelCase (lowercase_: str ) -> bytes:
if (len(lowercase_ ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:\nData does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase_ ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCamelCase (lowercase_: Tuple ) -> Optional[Any]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _a (nn.Module ):
'''simple docstring'''
def __init__( self , A__ , A__ ):
super().__init__()
A__ : Dict = module
A__ : Optional[Any] = nn.Sequential(
nn.Linear(module.in_features , UpperCamelCase_ , bias=UpperCamelCase_ ) , nn.Linear(UpperCamelCase_ , module.out_features , bias=UpperCamelCase_ ) , )
A__ : Union[str, Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __A ( self , A__ , *A__ , **A__ ):
return self.module(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) + self.adapter(UpperCamelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = '''bigscience/bloom-1b7'''
# Constant values
UpperCAmelCase__: int = 2.1_09_65_95_52_69_25_74
UpperCAmelCase__: int = '''Hello my name is'''
UpperCAmelCase__: str = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
UpperCAmelCase__: Optional[int] = 10
def __A ( self ):
A__ : List[Any] = AutoTokenizer.from_pretrained(self.model_name )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
# Models and tokenizer
A__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="""auto""" )
A__ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
def __A ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Any = self.model_abit.config
self.assertTrue(hasattr(UpperCamelCase_ , """quantization_config""" ) )
A__ : int = config.to_dict()
A__ : Union[str, Any] = config.to_diff_dict()
A__ : Optional[Any] = config.to_json_string()
def __A ( self ):
from bitsandbytes.nn import Paramsabit
A__ : Any = self.model_fpaa.get_memory_footprint()
A__ : str = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A__ : Optional[Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __A ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(UpperCamelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __A ( self ):
A__ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : List[Any] = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __A ( self ):
A__ : Tuple = BitsAndBytesConfig()
A__ : Optional[int] = True
A__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase_ , device_map="""auto""" )
A__ : str = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Tuple = model_abit_from_config.generate(
input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
def __A ( self ):
with self.assertRaises(UpperCamelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(UpperCamelCase_ )
def __A ( self ):
A__ : int = BitsAndBytesConfig()
with self.assertRaises(UpperCamelCase_ ):
A__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=UpperCamelCase_ , load_in_abit=UpperCamelCase_ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , )
def __A ( self ):
with self.assertRaises(UpperCamelCase_ ):
# Tries with `str`
self.model_abit.to("""cpu""" )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("""cuda:0""" ) )
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(UpperCamelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A__ : int = self.tokenizer(self.input_text , return_tensors="""pt""" )
A__ : Union[str, Any] = self.model_fpaa.to(torch.floataa )
A__ : Tuple = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A__ : Tuple = self.model_fpaa.to("""cpu""" )
# Check this does not throw an error
A__ : Dict = self.model_fpaa.half()
# Check this does not throw an error
A__ : Tuple = self.model_fpaa.float()
def __A ( self ):
A__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _a (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __A ( cls ):
A__ : Optional[int] = 't5-small'
A__ : Tuple = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
A__ : Optional[int] = AutoTokenizer.from_pretrained(cls.model_name )
A__ : Union[str, Any] = 'Translate in German: Hello, my dog is cute'
def __A ( self ):
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
from transformers import TaForConditionalGeneration
A__ : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
A__ : Optional[Any] = None
# test with `t5-small`
A__ : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
A__ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : Optional[Any] = model.generate(**UpperCamelCase_ )
# test with `flan-t5-small`
A__ : Dict = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
A__ : str = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : int = model.generate(**UpperCamelCase_ )
A__ : Optional[Any] = modules
def __A ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A__ : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : List[Any] = model.generate(**UpperCamelCase_ )
# test with `flan-t5-small`
A__ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
A__ : Any = self.tokenizer(self.input_text , return_tensors="""pt""" ).to(0 )
A__ : List[Any] = model.generate(**UpperCamelCase_ )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
# model_name
A__ : str = 'bigscience/bloom-560m'
A__ : str = 't5-small'
# Different types of model
A__ : Tuple = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
# Sequence classification model
A__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
# CausalLM model
A__ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
# Seq2seq model
A__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=UpperCamelCase_ , device_map="""auto""" )
def __A ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
def __A ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : str = pipeline(
"""text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A__ : int = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
super().setUp()
def __A ( self ):
A__ : List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=UpperCamelCase_ , device_map="""balanced""" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A__ : int = self.tokenizer(self.input_text , return_tensors="""pt""" )
# Second real batch
A__ : str = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase_ ) , self.EXPECTED_OUTPUTS )
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = 'facebook/opt-350m'
super().setUp()
def __A ( self ):
if version.parse(importlib.metadata.version("""bitsandbytes""" ) ) < version.parse("""0.37.0""" ):
return
# Step 1: freeze all parameters
A__ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A__ : List[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A__ : int = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(UpperCamelCase_ ) ):
A__ : Optional[Any] = LoRALayer(module.q_proj , rank=16 )
A__ : Optional[int] = LoRALayer(module.k_proj , rank=16 )
A__ : Tuple = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A__ : Dict = self.tokenizer("""Test batch """ , return_tensors="""pt""" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A__ : List[str] = model.forward(**UpperCamelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(UpperCamelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Tuple = '''gpt2-xl'''
UpperCAmelCase__: int = 3.31_91_85_48_54_15_21_87
| 704 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A_ : Dict = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCamelCase (lowercase_: Optional[Any] ) -> Optional[int]:
A__ : List[Any] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
A_ : Any = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : Dict = list(s_dict.keys() )
for key in keys:
A__ : List[str] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ : List[Any] = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ : Tuple = s_dict.pop(lowercase_ )
return s_dict
def UpperCamelCase (lowercase_: Tuple ) -> Optional[int]:
A__ , A__ : Any = emb.weight.shape
A__ : str = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
A__ : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bytes:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
A__ : Tuple = os.path.basename(lowercase_ )
A__ : int = url.split("""/""" )[-2]
A__ : Dict = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ) and not os.path.isfile(lowercase_ ):
raise RuntimeError(f"""{download_target} exists and is not a regular file""" )
if os.path.isfile(lowercase_ ):
A__ : Optional[Any] = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(lowercase_ ) as source, open(lowercase_ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=lowercase_ , unit_divisor=1024 ) as loop:
while True:
A__ : Any = source.read(8192 )
if not buffer:
break
output.write(lowercase_ )
loop.update(len(lowercase_ ) )
A__ : Dict = open(lowercase_ , """rb""" ).read()
if hashlib.shaaaa(lowercase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def UpperCamelCase (lowercase_: List[Any] , lowercase_: Tuple ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
A__ : Tuple = _download(_MODELS[checkpoint_path] )
else:
A__ : Optional[int] = torch.load(lowercase_ , map_location="""cpu""" )
A__ : str = original_checkpoint["""dims"""]
A__ : List[Any] = original_checkpoint["""model_state_dict"""]
A__ : Optional[Any] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(lowercase_ )
rename_keys(lowercase_ )
A__ : List[str] = True
A__ : Optional[Any] = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
A__ : List[Any] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=lowercase_ , decoder_ffn_dim=lowercase_ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ : Optional[Any] = WhisperForConditionalGeneration(lowercase_ )
A__ , A__ : List[Any] = model.model.load_state_dict(lowercase_ , strict=lowercase_ )
if len(lowercase_ ) > 0 and not set(lowercase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ : str = proj_out_weights
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (lowercase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = VideoToVideoSDPipeline
UpperCAmelCase__: str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
UpperCAmelCase__: Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
UpperCAmelCase__: List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCAmelCase__: Optional[int] = False
# No `output_type`.
UpperCAmelCase__: int = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Optional[int] = CLIPTextModel(__lowerCamelCase )
A__ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
A__ : Optional[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
A__ : str = torch.manual_seed(__lowerCamelCase )
else:
A__ : Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
A__ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def __A ( self ):
A__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ : Any = self.get_dummy_components()
A__ : List[Any] = VideoToVideoSDPipeline(**__lowerCamelCase )
A__ : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ : int = self.get_dummy_inputs(__lowerCamelCase )
A__ : int = "np"
A__ : Tuple = sd_pipe(**__lowerCamelCase ).frames
A__ : Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A__ : int = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Any = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = torch.randn((1, 10, 3, 1024, 576) , generator=__lowerCamelCase )
A__ : Dict = video.to("""cuda""" )
A__ : Union[str, Any] = "Spiderman is surfing"
A__ : Optional[Any] = pipe(__lowerCamelCase , video=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=3 , output_type="""pt""" ).frames
A__ : Dict = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 705 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a (__lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = UnCLIPImageVariationPipeline
UpperCAmelCase__: Optional[int] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCAmelCase__: Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__: List[str] = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCAmelCase__: int = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
A__ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCAmelCase_ )
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCAmelCase_ )
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Dict = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
A__ : int = UnCLIPTextProjModel(**UpperCAmelCase_ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : str = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
A__ : Optional[Any] = UNetaDConditionModel(**UpperCAmelCase_ )
return model
@property
def __A ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __A ( self ):
torch.manual_seed(0 )
A__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __A ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
A__ : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __A ( self ):
A__ : Optional[int] = self.dummy_decoder
A__ : Dict = self.dummy_text_proj
A__ : int = self.dummy_text_encoder
A__ : Tuple = self.dummy_tokenizer
A__ : Optional[Any] = self.dummy_super_res_first
A__ : List[str] = self.dummy_super_res_last
A__ : Optional[Any] = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
A__ : Tuple = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
A__ : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
A__ : Optional[Any] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __A ( self , A__ , A__=0 , A__=True ):
A__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
A__ : Optional[int] = torch.manual_seed(UpperCAmelCase_ )
else:
A__ : str = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
if pil_image:
A__ : List[Any] = input_image * 0.5 + 0.5
A__ : str = input_image.clamp(0 , 1 )
A__ : Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A__ : int = DiffusionPipeline.numpy_to_pil(UpperCAmelCase_ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __A ( self ):
A__ : Any = 'cpu'
A__ : Optional[Any] = self.get_dummy_components()
A__ : Dict = self.pipeline_class(**UpperCAmelCase_ )
A__ : int = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A__ : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
A__ : Any = pipe(**UpperCAmelCase_ )
A__ : int = output.images
A__ : Dict = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
A__ : List[str] = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
A__ : int = image[0, -3:, -3:, -1]
A__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : int = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
A__ : Tuple = 'cpu'
A__ : Dict = self.get_dummy_components()
A__ : Any = self.pipeline_class(**UpperCAmelCase_ )
A__ : Any = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A__ : List[str] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
A__ : Union[str, Any] = pipe(**UpperCAmelCase_ )
A__ : Optional[Any] = output.images
A__ : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
A__ : Dict = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
A__ : Union[str, Any] = image[0, -3:, -3:, -1]
A__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Dict = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
A__ : int = 'cpu'
A__ : int = self.get_dummy_components()
A__ : Tuple = self.pipeline_class(**UpperCAmelCase_ )
A__ : Dict = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A__ : Dict = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
A__ : Dict = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
A__ : int = pipe(**UpperCAmelCase_ )
A__ : Optional[int] = output.images
A__ : Tuple = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
A__ : List[str] = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
A__ : str = pipe(
**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
A__ : str = image[0, -3:, -3:, -1]
A__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
A__ : Any = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
A__ : List[Any] = torch.device("""cpu""" )
class _a :
'''simple docstring'''
UpperCAmelCase__: Any = 1
A__ : List[Any] = self.get_dummy_components()
A__ : Dict = self.pipeline_class(**UpperCAmelCase_ )
A__ : str = pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
A__ : Dict = torch.Generator(device=UpperCAmelCase_ ).manual_seed(0 )
A__ : Union[str, Any] = pipe.decoder.dtype
A__ : Optional[int] = 1
A__ : Optional[Any] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
A__ : Optional[int] = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler() )
A__ : Any = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
A__ : str = pipe.prepare_latents(
UpperCAmelCase_ , dtype=UpperCAmelCase_ , device=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , scheduler=DummyScheduler() )
A__ : List[str] = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
A__ : Union[str, Any] = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ ).images
A__ : int = self.get_dummy_inputs(UpperCAmelCase_ , pil_image=UpperCAmelCase_ )
# Don't pass image, instead pass embedding
A__ : Dict = pipeline_inputs.pop("""image""" )
A__ : int = pipe.image_encoder(UpperCAmelCase_ ).image_embeds
A__ : int = pipe(
**UpperCAmelCase_ , decoder_latents=UpperCAmelCase_ , super_res_latents=UpperCAmelCase_ , image_embeddings=UpperCAmelCase_ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __A ( self ):
A__ : Optional[int] = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
A__ : Optional[Any] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCAmelCase_ , expected_max_diff=UpperCAmelCase_ )
@skip_mps
def __A ( self ):
A__ : List[Any] = torch_device == 'cpu'
A__ : Dict = True
A__ : Optional[int] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCAmelCase_ , relax_max_difference=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
def __A ( self ):
A__ : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
A__ : Tuple = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCAmelCase_ , additional_params_copy_to_batched_inputs=UpperCAmelCase_ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCAmelCase_ )
@skip_mps
def __A ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self ):
return super().test_save_load_local()
@skip_mps
def __A ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
A__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
A__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
A__ : Any = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
A__ : Dict = pipeline.to(UpperCAmelCase_ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase_ )
A__ : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipeline(
UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type="""np""" , )
A__ : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCAmelCase_ , UpperCAmelCase_ , 15 )
| 706 |
def UpperCamelCase (lowercase_: int ) -> int:
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""Input value must be an 'int' type""" )
A__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : List[str] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A_ = logging.get_logger(__name__)
def UpperCamelCase (lowercase_: List[Any] , lowercase_: int , lowercase_: int ) -> Optional[int]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: Optional[str] , lowercase_: Optional[str] ) -> Optional[Any]:
A__ : int = to_pil_image(__A )
A__ : Tuple = pil_image.size
A__ : Optional[Any] = pytesseract.image_to_data(__A , lang=__A , output_type="""dict""" , config=__A )
A__ : Optional[Any] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
A__ : Dict = [idx for idx, word in enumerate(__A ) if not word.strip()]
A__ : str = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
A__ : Optional[int] = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
A__ : List[Any] = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
A__ : str = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
A__ : int = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
A__ : Tuple = []
for x, y, w, h in zip(__A , __A , __A , __A ):
A__ : str = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
A__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _a (__A ):
'''simple docstring'''
UpperCAmelCase__: int = ['''pixel_values''']
def __init__( self , A__ = True , A__ = None , A__ = PILImageResampling.BILINEAR , A__ = True , A__ = 1 / 255 , A__ = True , A__ = None , A__ = None , A__ = True , A__ = None , A__ = "" , **A__ , ):
super().__init__(**A__ )
A__ : Tuple = size if size is not None else {'''height''': 224, '''width''': 224}
A__ : Optional[Any] = get_size_dict(A__ )
A__ : Optional[Any] = do_resize
A__ : List[Any] = size
A__ : List[str] = resample
A__ : Dict = do_rescale
A__ : str = rescale_value
A__ : Optional[int] = do_normalize
A__ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
A__ : List[Any] = apply_ocr
A__ : Union[str, Any] = ocr_lang
A__ : str = tesseract_config
def __A ( self , A__ , A__ , A__ = PILImageResampling.BILINEAR , A__ = None , **A__ , ):
A__ : Tuple = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
A__ : Optional[Any] = (size['''height'''], size['''width'''])
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ , A__ = None , **A__ , ):
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ , A__ , A__ = None , **A__ , ):
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def __A ( self , A__ , A__ = None , A__ = None , A__=None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
A__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
A__ : Tuple = size if size is not None else self.size
A__ : Optional[int] = get_size_dict(A__ )
A__ : List[str] = resample if resample is not None else self.resample
A__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
A__ : List[Any] = image_mean if image_mean is not None else self.image_mean
A__ : Optional[int] = image_std if image_std is not None else self.image_std
A__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
A__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
A__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
A__ : str = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
A__ : Tuple = [to_numpy_array(A__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
A__ : int = []
A__ : Tuple = []
for image in images:
A__ : Dict = apply_tesseract(A__ , A__ , A__ )
words_batch.append(A__ )
boxes_batch.append(A__ )
if do_resize:
A__ : int = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_rescale:
A__ : Tuple = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
A__ : str = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
A__ : Optional[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
A__ : Dict = BatchFeature(data={"""pixel_values""": images} , tensor_type=A__ )
if apply_ocr:
A__ : int = words_batch
A__ : List[str] = boxes_batch
return data
| 708 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self , A__ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
A__ : str = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A__ )
def __A ( self ):
A__ : Dict = """sshleifer/tiny-gpt2"""
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ )
A__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Dict = """sgugger/tiny-distilbert-classification"""
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , only_pretrain_model=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Any = """sshleifer/tiny-gpt2"""
A__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , torchscript=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Tuple = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , fpaa=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : str = PyTorchBenchmark(A__ )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[Any] = """sshleifer/tiny-gpt2"""
A__ : Tuple = AutoConfig.from_pretrained(A__ )
# set architectures equal to `None`
A__ : List[Any] = None
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[str] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Any = PyTorchBenchmark(A__ )
A__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
A__ : Optional[int] = AutoConfig.from_pretrained(A__ )
A__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : List[str] = """sshleifer/tinier_bart"""
A__ : List[str] = AutoConfig.from_pretrained(A__ )
A__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : Union[str, Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
A__ : Union[str, Any] = AutoConfig.from_pretrained(A__ )
A__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : int = PyTorchBenchmark(A__ , configs=[config] )
A__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : Dict = """sshleifer/tinier_bart"""
A__ : int = AutoConfig.from_pretrained(A__ )
A__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A__ , )
A__ : List[Any] = PyTorchBenchmark(A__ , configs=[config] )
A__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ):
A__ : int = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , save_to_csv=A__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A__ , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(A__ , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(A__ , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(A__ , """train_time.csv""" ) , env_info_csv_file=os.path.join(A__ , """env.csv""" ) , multi_process=A__ , )
A__ : Optional[Any] = PyTorchBenchmark(A__ )
benchmark.run()
self.assertTrue(Path(os.path.join(A__ , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A__ , """env.csv""" ) ).exists() )
def __A ( self ):
A__ : Optional[int] = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A__ ):
self.assertTrue(hasattr(A__ , """sequential""" ) )
self.assertTrue(hasattr(A__ , """cumulative""" ) )
self.assertTrue(hasattr(A__ , """current""" ) )
self.assertTrue(hasattr(A__ , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A__ , inference=A__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A__ , """log.txt""" ) , log_print=A__ , trace_memory_line_by_line=A__ , multi_process=A__ , )
A__ : Dict = PyTorchBenchmark(A__ )
A__ : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A__ , """log.txt""" ) ).exists() )
| 64 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 16_00, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 16_00, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_lowerCAmelCase , )
assert hasattr(self , """env""" )
def __A ( self , A__ ):
# configuration for running training on smdistributed Model Parallel
A__ : Union[str, Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
A__ : Any = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
A__ : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
A__ : int = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=_lowerCAmelCase , py_version="""py36""" , )
def __A ( self , A__ ):
TrainingJobAnalytics(_lowerCAmelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __A ( self , A__ ):
# create estimator
A__ : int = self.create_estimator(_lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
A__ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
A__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
A__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
A__ : Tuple = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _lowerCAmelCase )
| 709 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A_ : Optional[int] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase (lowercase_: List[str] ) -> Any:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def UpperCamelCase (lowercase_: Optional[int] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def UpperCamelCase (lowercase_: List[str] ) -> Optional[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
A__ : List[Any] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: int ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
A__ : Tuple = 0
# Doctest custom flag to ignore output.
A_ : Tuple = doctest.register_optionflag('IGNORE_RESULT')
A_ : Dict = doctest.OutputChecker
class _a (__magic_name__ ):
'''simple docstring'''
def __A ( self , A__ , A__ , A__ ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , A__ , A__ , A__ )
A_ : str = CustomOutputChecker
A_ : Dict = HfDoctestModule
A_ : Optional[int] = HfDocTestParser
| 64 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A_ : Tuple = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A_ : Optional[Any] = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A_ : Optional[Any] = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A_ : Optional[Any] = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A_ : List[str] = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A_ : str = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def UpperCamelCase (lowercase_: Optional[int] ) -> Tuple:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def UpperCamelCase (lowercase_: str , lowercase_: Tuple , lowercase_: int , lowercase_: List[Any] , lowercase_: int=False ) -> int:
A__ : Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
A__ : Any = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
A__ : Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
A__ : List[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
A__ : Dict = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
A__ : List[str] = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
A__ : List[str] = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
A__ : Optional[int] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
A__ : Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
A__ : Union[str, Any] = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
A__ : List[str] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
A__ : int = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple , lowercase_: str , lowercase_: List[Any] , lowercase_: Dict=None ) -> Optional[Any]:
A__ : int = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
A__ : List[str] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
A__ : Any = checkpoint[f"""{old_prefix}.norm.weight"""]
A__ : int = checkpoint[f"""{old_prefix}.norm.bias"""]
A__ : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
A__ : int = bias_q.squeeze(-1 ).squeeze(-1 )
A__ : List[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
A__ : Any = bias_k.squeeze(-1 ).squeeze(-1 )
A__ : List[str] = weight_v.squeeze(-1 ).squeeze(-1 )
A__ : Any = bias_v.squeeze(-1 ).squeeze(-1 )
A__ : Dict = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
A__ : List[str] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: int ) -> Dict:
A__ : int = torch.load(lowerCamelCase__ , map_location="""cpu""" )
A__ : Union[str, Any] = {}
A__ : Tuple = checkpoint["time_embed.0.weight"]
A__ : Any = checkpoint["time_embed.0.bias"]
A__ : Union[str, Any] = checkpoint["time_embed.2.weight"]
A__ : List[Any] = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
A__ : str = checkpoint["label_emb.weight"]
A__ : Union[str, Any] = checkpoint["input_blocks.0.0.weight"]
A__ : Optional[int] = checkpoint["input_blocks.0.0.bias"]
A__ : Tuple = unet_config["down_block_types"]
A__ : Optional[Any] = unet_config["layers_per_block"]
A__ : Optional[Any] = unet_config["attention_head_dim"]
A__ : int = unet_config["block_out_channels"]
A__ : int = 1
A__ : List[Any] = channels_list[0]
for i, layer_type in enumerate(lowerCamelCase__ ):
A__ : Tuple = channels_list[i]
A__ : Tuple = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCamelCase__ ):
A__ : Dict = f"""down_blocks.{i}.resnets.{j}"""
A__ : Dict = f"""input_blocks.{current_layer}.0"""
A__ : Optional[Any] = True if j == 0 and downsample_block_has_skip else False
A__ : Dict = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCamelCase__ ):
A__ : str = f"""down_blocks.{i}.resnets.{j}"""
A__ : Any = f"""input_blocks.{current_layer}.0"""
A__ : str = True if j == 0 and downsample_block_has_skip else False
A__ : Union[str, Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ )
A__ : List[str] = f"""down_blocks.{i}.attentions.{j}"""
A__ : Dict = f"""input_blocks.{current_layer}.1"""
A__ : Union[str, Any] = convert_attention(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
current_layer += 1
if i != len(lowerCamelCase__ ) - 1:
A__ : Dict = f"""down_blocks.{i}.downsamplers.0"""
A__ : int = f"""input_blocks.{current_layer}.0"""
A__ : Union[str, Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
current_layer += 1
A__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
A__ : str = "mid_block.resnets.0"
A__ : Optional[Any] = "middle_block.0"
A__ : Optional[int] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A__ : str = "mid_block.attentions.0"
A__ : int = "middle_block.1"
A__ : Union[str, Any] = convert_attention(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A__ : List[str] = "mid_block.resnets.1"
A__ : int = "middle_block.2"
A__ : Any = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A__ : Dict = 0
A__ : List[str] = unet_config["up_block_types"]
for i, layer_type in enumerate(lowerCamelCase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A__ : Union[str, Any] = f"""up_blocks.{i}.resnets.{j}"""
A__ : Optional[int] = f"""output_blocks.{current_layer}.0"""
A__ : Union[str, Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ )
current_layer += 1
if i != len(lowerCamelCase__ ) - 1:
A__ : List[Any] = f"""up_blocks.{i}.upsamplers.0"""
A__ : List[Any] = f"""output_blocks.{current_layer-1}.1"""
A__ : List[Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A__ : int = f"""up_blocks.{i}.resnets.{j}"""
A__ : Union[str, Any] = f"""output_blocks.{current_layer}.0"""
A__ : Dict = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ )
A__ : Union[str, Any] = f"""up_blocks.{i}.attentions.{j}"""
A__ : str = f"""output_blocks.{current_layer}.1"""
A__ : Tuple = convert_attention(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
current_layer += 1
if i != len(lowerCamelCase__ ) - 1:
A__ : Any = f"""up_blocks.{i}.upsamplers.0"""
A__ : List[Any] = f"""output_blocks.{current_layer-1}.2"""
A__ : Optional[int] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A__ : Union[str, Any] = checkpoint["out.0.weight"]
A__ : Optional[int] = checkpoint["out.0.bias"]
A__ : Optional[int] = checkpoint["out.2.weight"]
A__ : List[Any] = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
A_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
A_ : Any = parser.parse_args()
A_ : Optional[Any] = strabool(args.class_cond)
A_ : Any = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
A_ : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ : str = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A_ : List[Any] = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
A_ : List[str] = None
A_ : str = con_pt_to_diffuser(args.unet_path, unet_config)
A_ : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A_ : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A_ : List[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ : List[str] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
A_ : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
A_ : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 710 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _a :
'''simple docstring'''
UpperCAmelCase__: List[Any] = PegasusConfig
UpperCAmelCase__: Optional[int] = {}
UpperCAmelCase__: List[str] = '''gelu'''
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__=0.1 , A__=0.1 , A__=40 , A__=2 , A__=1 , A__=0 , ):
A__ : Dict = parent
A__ : Dict = batch_size
A__ : Any = seq_length
A__ : Optional[Any] = is_training
A__ : int = use_labels
A__ : Any = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Tuple = num_attention_heads
A__ : List[Any] = intermediate_size
A__ : Union[str, Any] = hidden_dropout_prob
A__ : Optional[Any] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Any = eos_token_id
A__ : List[Any] = pad_token_id
A__ : List[Any] = bos_token_id
def __A ( self ):
A__ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : str = prepare_pegasus_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def __A ( self , A__ , A__ ):
A__ : int = TFPegasusModel(config=A__ ).get_decoder()
A__ : List[Any] = inputs_dict["""input_ids"""]
A__ : Any = input_ids[:1, :]
A__ : Optional[Any] = inputs_dict["""attention_mask"""][:1, :]
A__ : Optional[int] = inputs_dict["""head_mask"""]
A__ : Any = 1
# first forward pass
A__ : Tuple = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
A__ , A__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Optional[Any] = model(A__ , attention_mask=A__ )[0]
A__ : Any = model(A__ , attention_mask=A__ , past_key_values=A__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Any = output_from_no_past[:, -3:, random_slice_idx]
A__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A__ , A__ , rtol=1e-3 )
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict , lowercase_: List[Any] , lowercase_: Dict=None , lowercase_: int=None , lowercase_: List[Any]=None , lowercase_: List[Any]=None , lowercase_: str=None , ) -> int:
if attention_mask is None:
A__ : List[str] = tf.cast(tf.math.not_equal(lowercase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _a (__magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__: Tuple = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__: int = True
UpperCAmelCase__: Union[str, Any] = False
UpperCAmelCase__: List[str] = False
def __A ( self ):
A__ : Optional[Any] = TFPegasusModelTester(self )
A__ : Tuple = ConfigTester(self , config_class=A__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase__: Any = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCAmelCase__: List[str] = '''google/pegasus-xsum'''
@cached_property
def __A ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self ):
A__ : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __A ( self , **A__ ):
A__ : str = self.translate_src_text(**A__ )
assert self.expected_text == generated_words
def __A ( self , **A__ ):
A__ : List[str] = self.tokenizer(self.src_text , **A__ , padding=A__ , return_tensors="""tf""" )
A__ : Optional[int] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A__ , )
A__ : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A__ )
return generated_words
@slow
def __A ( self ):
self._assert_generated_batch_equal_expected()
| 64 | 0 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase (lowercase_: Any ) -> int:
if isinstance(_lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _a :
'''simple docstring'''
def __A ( self , A__ , A__ ):
pass
def __A ( self ):
pass
def __A ( self ):
pass
def __A ( self , A__ , A__ , A__ , A__ , A__=None , **A__ ):
A__ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(__A , __A )
A__ : Optional[int] = TFVisionTextDualEncoderModel(__A )
A__ : Union[str, Any] = model(input_ids=__A , pixel_values=__A , attention_mask=__A )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __A ( self , A__ , A__ , A__ , A__ , A__=None , **A__ ):
A__ : Optional[int] = self.get_vision_text_model(__A , __A )
A__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=__A , text_model=__A )
A__ : Any = model(input_ids=__A , pixel_values=__A , attention_mask=__A )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __A ( self , A__ , A__ , A__ , A__ , A__=None , **A__ ):
A__ : Union[str, Any] = self.get_vision_text_model(__A , __A )
A__ : Dict = {"vision_model": vision_model, "text_model": text_model}
A__ : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__A )
A__ : List[str] = model(input_ids=__A , pixel_values=__A , attention_mask=__A )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __A ( self , A__ , A__ , A__ , A__ , A__=None , **A__ ):
A__ : List[str] = self.get_vision_text_model(__A , __A )
A__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=__A , text_model=__A )
A__ : Optional[int] = model(input_ids=__A , pixel_values=__A , attention_mask=__A )
A__ : List[str] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
A__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(__A )
A__ : Any = model(input_ids=__A , pixel_values=__A , attention_mask=__A )
A__ : Optional[int] = after_output[0].numpy()
A__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1e-5 )
def __A ( self , A__ , A__ , A__ , A__ , A__=None , **A__ ):
A__ : int = self.get_vision_text_model(__A , __A )
A__ : Any = TFVisionTextDualEncoderModel(vision_model=__A , text_model=__A )
A__ : str = model(
input_ids=__A , pixel_values=__A , attention_mask=__A , output_attentions=__A )
A__ : Any = output.vision_model_output.attentions
self.assertEqual(len(__A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ : Any = to_atuple(vision_model.config.image_size )
A__ : Any = to_atuple(vision_model.config.patch_size )
A__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A__ : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(__A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = np.abs((a - b) ).max()
self.assertLessEqual(__A , __A , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __A ( self ):
A__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__A )
def __A ( self ):
A__ : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__A )
def __A ( self ):
A__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__A )
def __A ( self ):
A__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_save_load(**__A )
def __A ( self ):
A__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__A )
@slow
def __A ( self ):
A__ : Any = self.get_pretrained_model_and_inputs()
A__ : Tuple = model_a(**__A )
A__ : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__A )
A__ : int = TFVisionTextDualEncoderModel.from_pretrained(__A )
A__ : List[str] = model_a(**__A )
A__ : Any = after_outputs[0].numpy()
A__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1e-5 )
@require_tf
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
A__ : List[str] = 13
A__ : List[str] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A__ : Tuple = random_attention_mask([batch_size, 4] )
A__ : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __A ( self , A__ , A__ ):
A__ : List[Any] = TFViTModel(__A , name="""vision_model""" )
A__ : str = TFBertModel(__A , name="""text_model""" )
return vision_model, text_model
def __A ( self ):
A__ : Dict = TFViTModelTester(self )
A__ : int = TFBertModelTester(self )
A__ : Tuple = vit_model_tester.prepare_config_and_inputs()
A__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
A__ : str = vision_config_and_inputs
(
A__
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
A__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
A__ : int = 13
A__ : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A__ : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A__ : Optional[int] = random_attention_mask([batch_size, 4] )
A__ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __A ( self , A__ , A__ , A__ , A__ , A__=None , **A__ ):
A__ : Dict = self.get_vision_text_model(__A , __A )
A__ : int = TFVisionTextDualEncoderModel(vision_model=__A , text_model=__A )
A__ : List[Any] = model(
input_ids=__A , pixel_values=__A , attention_mask=__A , output_attentions=__A )
A__ : int = output.vision_model_output.attentions
self.assertEqual(len(__A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A__ : Union[str, Any] = to_atuple(vision_model.config.image_size )
A__ : Tuple = to_atuple(vision_model.config.patch_size )
A__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A__ : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A__ : Any = output.text_model_output.attentions
self.assertEqual(len(__A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __A ( self , A__ , A__ ):
A__ : Union[str, Any] = TFDeiTModel(__A , name="""vision_model""" )
A__ : str = TFRobertaModel(__A , name="""text_model""" )
return vision_model, text_model
def __A ( self ):
A__ : Tuple = TFDeiTModelTester(self )
A__ : Union[str, Any] = TFRobertaModelTester(self )
A__ : str = vit_model_tester.prepare_config_and_inputs()
A__ : Any = bert_model_tester.prepare_config_and_inputs()
A__ : List[Any] = vision_config_and_inputs
(
A__
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
A__ : List[Any] = 13
A__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A__ : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
A__ : List[Any] = random_attention_mask([batch_size, 4] )
A__ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __A ( self , A__ , A__ ):
A__ : Tuple = TFCLIPVisionModel(__A , name="""vision_model""" )
A__ : int = TFBertModel(__A , name="""text_model""" )
return vision_model, text_model
def __A ( self ):
A__ : Union[str, Any] = TFCLIPVisionModelTester(self )
A__ : Dict = TFBertModelTester(self )
A__ : List[str] = clip_model_tester.prepare_config_and_inputs()
A__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
A__ : Optional[Any] = vision_config_and_inputs
(
A__
) : int = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _a (unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ):
A__ : Dict = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__A )
A__ : List[Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ : str = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__A , padding=__A , return_tensors="""np""" )
A__ : int = model(**__A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A__ : Dict = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __A , atol=1e-3 ) ) | 711 |
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | 0 |
import numpy as np
class _a :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = (0, 0)
A__ : Dict = None
A__ : List[str] = 0
A__ : Optional[int] = 0
A__ : int = 0
def __eq__( self , A__ ):
return self.position == cell.position
def __A ( self ):
print(self.position )
class _a :
'''simple docstring'''
def __init__( self , A__=(5, 5) ):
A__ : int = np.zeros(_a )
A__ : List[str] = world_size[0]
A__ : Any = world_size[1]
def __A ( self ):
print(self.w )
def __A ( self , A__ ):
A__ : Any = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
A__ : int = cell.position[0]
A__ : List[Any] = cell.position[1]
A__ : Any = []
for n in neughbour_cord:
A__ : Union[str, Any] = current_x + n[0]
A__ : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
A__ : Optional[int] = Cell()
A__ : Dict = (x, y)
A__ : int = cell
neighbours.append(_a )
return neighbours
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: List[Any] ) -> List[Any]:
A__ : Any = []
A__ : Dict = []
_open.append(__lowerCAmelCase )
while _open:
A__ : Any = np.argmin([n.f for n in _open] )
A__ : str = _open[min_f]
_closed.append(_open.pop(__lowerCAmelCase ) )
if current == goal:
break
for n in world.get_neigbours(__lowerCAmelCase ):
for c in _closed:
if c == n:
continue
A__ : Optional[Any] = current.g + 1
A__ : int = n.position
A__ : List[Any] = goal.position
A__ : Tuple = (ya - ya) ** 2 + (xa - xa) ** 2
A__ : Any = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__lowerCAmelCase )
A__ : str = []
while current.parent is not None:
path.append(current.position )
A__ : Tuple = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A_ : Optional[Any] = Gridworld()
# Start position and goal
A_ : str = Cell()
A_ : Union[str, Any] = (0, 0)
A_ : Dict = Cell()
A_ : str = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
A_ : List[str] = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A_ : str = 1
print(world.w)
| 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a (_snake_case ):
'''simple docstring'''
UpperCAmelCase__: Tuple = ['image_processor', 'tokenizer']
UpperCAmelCase__: Tuple = 'CLIPImageProcessor'
UpperCAmelCase__: Optional[int] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , A__=None , A__=None , **A__ ):
A__ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , A__ , )
A__ : List[Any] = kwargs.pop("""feature_extractor""" )
A__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A__ : int = self.tokenizer(A__ , return_tensors=A__ , **A__ )
if images is not None:
A__ : Any = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None and images is not None:
A__ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A__ ) , tensor_type=A__ )
def __A ( self , *A__ , **A__ ):
return self.tokenizer.batch_decode(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
return self.tokenizer.decode(*A__ , **A__ )
@property
def __A ( self ):
A__ : str = self.tokenizer.model_input_names
A__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , A__ , )
return self.image_processor_class
@property
def __A ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , A__ , )
return self.image_processor
| 713 |
from __future__ import annotations
from collections.abc import Callable
A_ : List[Any] = list[list[float | int]]
def UpperCamelCase (lowercase_: Matrix , lowercase_: Matrix ) -> Matrix:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase_ )]
A__ : int
A__ : int
A__ : int
A__ : int
A__ : int
A__ : float
for row in range(lowercase_ ):
for col in range(lowercase_ ):
A__ : List[str] = matrix[row][col]
A__ : int = vector[row][0]
A__ : Optional[int] = 0
A__ : str = 0
while row < size and col < size:
# pivoting
A__ : int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase_ , lowercase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
A__ , A__ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase_ ):
A__ : List[Any] = augmented[rowa][col] / augmented[row][col]
A__ : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase_ ):
for row in range(lowercase_ ):
A__ : List[str] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase_ )
]
def UpperCamelCase (lowercase_: list[int] ) -> Callable[[int], int]:
A__ : int = len(lowercase_ )
A__ : Matrix = [[0 for _ in range(lowercase_ )] for _ in range(lowercase_ )]
A__ : Matrix = [[0] for _ in range(lowercase_ )]
A__ : Matrix
A__ : int
A__ : int
A__ : int
for x_val, y_val in enumerate(lowercase_ ):
for col in range(lowercase_ ):
A__ : Dict = (x_val + 1) ** (size - col - 1)
A__ : Any = y_val
A__ : Union[str, Any] = solve(lowercase_ , lowercase_ )
def interpolated_func(lowercase_: int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase_ ) )
return interpolated_func
def UpperCamelCase (lowercase_: int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase (lowercase_: Callable[[int], int] = question_function , lowercase_: int = 10 ) -> int:
A__ : list[int] = [func(lowercase_ ) for x_val in range(1 , order + 1 )]
A__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
A__ : int = 0
A__ : Callable[[int], int]
A__ : int
for poly in polynomials:
A__ : List[str] = 1
while func(lowercase_ ) == poly(lowercase_ ):
x_val += 1
ret += poly(lowercase_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Any = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = '''funnel'''
UpperCAmelCase__: Any = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , A__=3_0522 , A__=[4, 4, 4] , A__=None , A__=2 , A__=768 , A__=12 , A__=64 , A__=3072 , A__="gelu_new" , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.1 , A__=None , A__=1e-9 , A__="mean" , A__="relative_shift" , A__=True , A__=True , A__=True , **A__ , ):
A__ : Union[str, Any] = vocab_size
A__ : Tuple = block_sizes
A__ : Tuple = [1] * len(_UpperCAmelCase ) if block_repeats is None else block_repeats
assert len(_UpperCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
A__ : List[Any] = num_decoder_layers
A__ : Optional[int] = d_model
A__ : int = n_head
A__ : Tuple = d_head
A__ : List[Any] = d_inner
A__ : Dict = hidden_act
A__ : Union[str, Any] = hidden_dropout
A__ : Optional[int] = attention_dropout
A__ : int = activation_dropout
A__ : Optional[int] = initializer_range
A__ : Tuple = initializer_std
A__ : Optional[int] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
A__ : Tuple = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
A__ : List[str] = attention_type
A__ : str = separate_cls
A__ : int = truncate_seq
A__ : int = pool_q_only
super().__init__(**_UpperCAmelCase )
@property
def __A ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __A ( self , A__ ):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def __A ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __A ( self , A__ ):
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 714 |
from functools import lru_cache
@lru_cache
def UpperCamelCase (lowercase_: int ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from scipy.stats import pearsonr
import datasets
A_ : Any = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
A_ : List[str] = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
A_ : Union[str, Any] = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , A__ , A__ , A__=False ):
if return_pvalue:
A__ : Union[str, Any] = pearsonr(_UpperCamelCase , _UpperCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_UpperCamelCase , _UpperCamelCase )[0] )}
| 715 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
class _a (datasets.BeamBasedBuilder ):
'''simple docstring'''
def __A ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A__ , )
def __A ( self , A__ , A__ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __A ( self , A__ , A__ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A__ )
def UpperCamelCase () -> Dict:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase () -> Tuple:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class _a (__magic_name__ ):
'''simple docstring'''
@require_beam
def __A ( self ):
A__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : int = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
import apache_beam as beam
A__ : int = beam.io.parquetio.WriteToParquet
A__ : List[str] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : str = DummyBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
A__ : Optional[Any] = partial(A__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __A ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : int = DummyBeamDataset(cache_dir=A__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __A ( self ):
A__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
A__ : Optional[int] = NestedBeamDataset(cache_dir=A__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A__ , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
A__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 64 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : str = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _a (a__ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = '''sew-d'''
def __init__( self , A__=32 , A__=768 , A__=12 , A__=12 , A__=3072 , A__=2 , A__=512 , A__=256 , A__=True , A__=True , A__=("p2c", "c2p") , A__="layer_norm" , A__="gelu_python" , A__=0.1 , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.1 , A__=0.0_2 , A__=1e-7 , A__=1e-5 , A__="group" , A__="gelu" , A__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A__=False , A__=128 , A__=16 , A__=True , A__=0.0_5 , A__=10 , A__=2 , A__=0.0 , A__=10 , A__=0 , A__="mean" , A__=False , A__=False , A__=256 , A__=0 , A__=1 , A__=2 , **A__ , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
A__ : int = hidden_size
A__ : Any = feat_extract_norm
A__ : Optional[int] = feat_extract_activation
A__ : Optional[int] = list(_A )
A__ : List[Any] = list(_A )
A__ : Tuple = list(_A )
A__ : Dict = conv_bias
A__ : Optional[Any] = num_conv_pos_embeddings
A__ : str = num_conv_pos_embedding_groups
A__ : Optional[Any] = len(self.conv_dim )
A__ : Union[str, Any] = num_hidden_layers
A__ : Optional[int] = intermediate_size
A__ : Dict = squeeze_factor
A__ : Any = max_position_embeddings
A__ : Optional[int] = position_buckets
A__ : Union[str, Any] = share_att_key
A__ : Tuple = relative_attention
A__ : int = norm_rel_ebd
A__ : str = list(_A )
A__ : str = hidden_act
A__ : Any = num_attention_heads
A__ : List[Any] = hidden_dropout
A__ : Optional[Any] = attention_dropout
A__ : List[str] = activation_dropout
A__ : int = feat_proj_dropout
A__ : Union[str, Any] = final_dropout
A__ : Any = layer_norm_eps
A__ : str = feature_layer_norm_eps
A__ : List[str] = initializer_range
A__ : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Tuple = apply_spec_augment
A__ : List[Any] = mask_time_prob
A__ : str = mask_time_length
A__ : Union[str, Any] = mask_time_min_masks
A__ : Any = mask_feature_prob
A__ : List[str] = mask_feature_length
A__ : List[Any] = mask_feature_min_masks
# ctc loss
A__ : Optional[Any] = ctc_loss_reduction
A__ : List[str] = ctc_zero_infinity
# sequence classification
A__ : Union[str, Any] = use_weighted_layer_sum
A__ : str = classifier_proj_size
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 716 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , A__ , )
super().__init__(*A__ , **A__ )
| 64 | 0 |
import datasets
A_ : Any = '\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n'
A_ : Optional[Any] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
A_ : Optional[Any] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: Dict ) -> int:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a (datasets.Metric ):
'''simple docstring'''
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def __A ( self , A__ , A__ ):
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
| 717 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64 | 0 |
import argparse
import os
import re
A_ : List[str] = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
A_ : str = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
A_ : Union[str, Any] = re.compile(r'\s*\(\s*\"(\S[^\"]+)\"')
def UpperCamelCase (lowercase_: str , lowercase_: bool = False ) -> Tuple:
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A__ : Tuple = f.read()
A__ : Union[str, Any] = content.split("""\n""" )
A__ : List[str] = []
A__ : Optional[Any] = 0
while line_idx < len(a_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
A__ : Optional[Any] = len(re.search(r"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
A__ : Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
A__ : Tuple = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
A__ : Union[str, Any] = sorted(a_ , key=lambda lowercase_ : _re_identifier.search(a_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(a_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(a_ ) )
elif "\n".join(a_ ) != content:
return True
def UpperCamelCase (lowercase_: bool = False ) -> str:
A__ : Union[str, Any] = [os.path.join(a_ , a_ ) for f in os.listdir(a_ ) if f.endswith(""".py""" )]
A__ : List[Any] = [sort_auto_mapping(a_ , overwrite=a_ ) for fname in fnames]
if not overwrite and any(a_ ):
A__ : Tuple = [f for f, d in zip(a_ , a_ ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {', '.join(a_ )}. Run `make style` to fix"""
""" this.""" )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A_ : Any = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 718 |
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Union[str, Any] = len(lowercase_ )
A__ : List[Any] = len(lowercase_ )
A__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
A__ : str = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A__ : int = True
if a[i].islower():
A__ : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
A_ : int = tuple[float, float, float]
A_ : Optional[int] = tuple[float, float, float]
def UpperCamelCase (lowercase_: Dict , lowercase_: Optional[int] ) -> Vectorad:
A__ : List[Any] = end_pointa[0] - end_pointa[0]
A__ : Optional[int] = end_pointa[1] - end_pointa[1]
A__ : List[str] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCamelCase (lowercase_: Tuple , lowercase_: Any ) -> Vectorad:
A__ : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCamelCase (lowercase_: Dict , lowercase_: Any ) -> bool:
return tuple(round(_A , _A ) for x in vector ) == (0, 0, 0)
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple , lowercase_: List[Any] , lowercase_: int = 10 ) -> bool:
A__ : Dict = create_vector(_A , _A )
A__ : List[Any] = create_vector(_A , _A )
return is_zero_vector(get_ad_vectors_cross(_A , _A ) , _A )
| 719 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Dict = random.Random()
if is_torch_available():
import torch
def UpperCamelCase (lowercase_: Tuple , lowercase_: Tuple=1.0 , lowercase_: Dict=None , lowercase_: int=None ) -> str:
if rng is None:
A__ : Optional[Any] = global_rng
A__ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a (unittest.TestCase ):
'''simple docstring'''
def __init__( self , A__ , A__=7 , A__=400 , A__=2000 , A__=1 , A__=0.0 , A__=1_6000 , A__=True , A__=True , ):
A__ : Any = parent
A__ : Optional[int] = batch_size
A__ : Union[str, Any] = min_seq_length
A__ : Dict = max_seq_length
A__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ : str = feature_size
A__ : Optional[int] = padding_value
A__ : List[str] = sampling_rate
A__ : List[str] = return_attention_mask
A__ : int = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A__=False , A__=False ):
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
A__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ : Optional[int] = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: int = ASTFeatureExtractor
def __A ( self ):
A__ : Optional[Any] = ASTFeatureExtractionTester(self )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
A__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ : Optional[Any] = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
A__ : Tuple = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
A__ : Tuple = feat_extract(A__ , padding=A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ : int = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ : List[str] = np.asarray(A__ )
A__ : Union[str, Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
A__ : Optional[Any] = feat_extract(A__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
@require_torch
def __A ( self ):
import torch
A__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ : Tuple = np.random.rand(100 ).astype(np.floataa )
A__ : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ : List[str] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ : Any = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self , A__ ):
from datasets import load_dataset
A__ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
A__ : str = ds.sort("""id""" ).select(range(A__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def __A ( self ):
# fmt: off
A__ : Optional[Any] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
A__ : Any = self._load_datasamples(1 )
A__ : Tuple = ASTFeatureExtractor()
A__ : Dict = feature_extractor(A__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A__ , atol=1e-4 ) )
| 64 | 0 |
from collections.abc import Iterable
from typing import Any
class _a :
'''simple docstring'''
def __init__( self , A__ = None ):
A__ : Tuple = value
A__ : str = None # Added in order to delete a node easier
A__ : Optional[Any] = None
A__ : Dict = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 )
class _a :
'''simple docstring'''
def __init__( self , A__ = None ):
A__ : Union[str, Any] = root
def __str__( self ):
return str(self.root )
def __A ( self , A__ , A__ ):
if new_children is not None: # reset its kids
A__ : Union[str, Any] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(snake_case_ ): # If it is the right children
A__ : Union[str, Any] = new_children
else:
A__ : Dict = new_children
else:
A__ : List[Any] = new_children
def __A ( self , A__ ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __A ( self ):
return self.root is None
def __A ( self , A__ ):
A__ : Dict = Node(snake_case_ ) # create a new Node
if self.empty(): # if Tree is empty
A__ : Tuple = new_node # set its root
else: # Tree is not empty
A__ : str = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
A__ : List[str] = new_node # We insert the new node in a leaf
break
else:
A__ : Dict = parent_node.left
else:
if parent_node.right is None:
A__ : str = new_node
break
else:
A__ : str = parent_node.right
A__ : List[str] = parent_node
def __A ( self , *A__ ):
for value in values:
self.__insert(snake_case_ )
def __A ( self , A__ ):
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
A__ : int = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
A__ : Optional[int] = node.left if value < node.value else node.right
return node
def __A ( self , A__ = None ):
if node is None:
if self.root is None:
return None
A__ : str = self.root
if not self.empty():
while node.right is not None:
A__ : Union[str, Any] = node.right
return node
def __A ( self , A__ = None ):
if node is None:
A__ : Tuple = self.root
if self.root is None:
return None
if not self.empty():
A__ : int = self.root
while node.left is not None:
A__ : Any = node.left
return node
def __A ( self , A__ ):
A__ : List[str] = self.search(snake_case_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(snake_case_ , snake_case_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(snake_case_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(snake_case_ , node.left )
else:
A__ : Optional[int] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
A__ : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __A ( self , A__ ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __A ( self , A__=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __A ( self , A__ , A__ ):
if node:
self.inorder(snake_case_ , node.left )
arr.append(node.value )
self.inorder(snake_case_ , node.right )
def __A ( self , A__ , A__ ):
A__ : Optional[int] = []
self.inorder(snake_case_ , snake_case_ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase (lowercase_: List[Any] ) -> list[Node]:
A__ : Optional[Any] = []
if curr_node is not None:
A__ : Tuple = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def UpperCamelCase () -> None:
A__ : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
A__ : Dict = BinarySearchTree()
for i in testlist:
t.insert(lowercase_ )
# Prints all the elements of the list in order traversal
print(lowercase_ )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn\'t exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn\'t exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 720 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a (__magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , A__ , A__ , A__ = None , A__ = 5_0257 , A__ = 1024 , A__ = 768 , A__ = 12 , A__ = 12 , A__ = None , A__ = "gelu_new" , A__ = 0.1 , A__ = 0.1 , A__ = 0.1 , A__ = 1e-5 , A__ = 0.0_2 , A__ = True , A__ = True , A__ = False , A__ = False , ):
super().__init__()
A__ : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
A__ : str = prefix_inner_dim
A__ : Optional[Any] = prefix_hidden_dim
A__ : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : int = (
nn.Linear(self.prefix_hidden_dim , A__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : Tuple = GPTaConfig(
vocab_size=A__ , n_positions=A__ , n_embd=A__ , n_layer=A__ , n_head=A__ , n_inner=A__ , activation_function=A__ , resid_pdrop=A__ , embd_pdrop=A__ , attn_pdrop=A__ , layer_norm_epsilon=A__ , initializer_range=A__ , scale_attn_weights=A__ , use_cache=A__ , scale_attn_by_inverse_layer_idx=A__ , reorder_and_upcast_attn=A__ , )
A__ : int = GPTaLMHeadModel(A__ )
def __A ( self , A__ , A__ , A__ = None , A__ = None , ):
A__ : List[str] = self.transformer.transformer.wte(A__ )
A__ : int = self.encode_prefix(A__ )
A__ : int = self.decode_prefix(A__ )
A__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
A__ : List[str] = self.transformer(inputs_embeds=A__ , labels=A__ , attention_mask=A__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __A ( self , A__ , A__ ):
return torch.zeros(A__ , self.prefix_length , dtype=torch.intaa , device=A__ )
def __A ( self , A__ ):
return self.encode_prefix(A__ )
@torch.no_grad()
def __A ( self , A__ , A__ , A__ ):
A__ : List[Any] = torch.split(A__ , 1 , dim=0 )
A__ : Optional[int] = []
A__ : str = []
for feature in features:
A__ : Dict = self.decode_prefix(feature.to(A__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Union[str, Any] = self.generate_beam(
input_embeds=A__ , device=A__ , eos_token_id=A__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : int = torch.stack(A__ )
A__ : List[Any] = torch.stack(A__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __A ( self , A__=None , A__=None , A__=None , A__ = 5 , A__ = 67 , A__ = 1.0 , A__ = None , ):
A__ : Any = eos_token_id
A__ : Any = None
A__ : Optional[int] = None
A__ : Optional[Any] = torch.ones(A__ , device=A__ , dtype=torch.int )
A__ : Any = torch.zeros(A__ , device=A__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Dict = input_embeds
else:
A__ : str = self.transformer.transformer.wte(A__ )
for i in range(A__ ):
A__ : Dict = self.transformer(inputs_embeds=A__ )
A__ : str = outputs.logits
A__ : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Any = logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Optional[int] = logits.topk(A__ , -1 )
A__ : List[Any] = generated.expand(A__ , *generated.shape[1:] )
A__ , A__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : Optional[Any] = next_tokens
else:
A__ : List[Any] = tokens.expand(A__ , *tokens.shape[1:] )
A__ : int = torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Optional[int] = -float(np.inf )
A__ : List[Any] = 0
A__ : str = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Dict = scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] = scores_sum_average.view(-1 ).topk(A__ , -1 )
A__ : Tuple = next_tokens // scores_sum.shape[1]
A__ : Optional[Any] = seq_lengths[next_tokens_source]
A__ : List[str] = next_tokens % scores_sum.shape[1]
A__ : Optional[int] = next_tokens.unsqueeze(1 )
A__ : int = tokens[next_tokens_source]
A__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
A__ : str = generated[next_tokens_source]
A__ : Optional[Any] = scores_sum_average * seq_lengths
A__ : Union[str, Any] = is_stopped[next_tokens_source]
A__ : str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
A__ : List[str] = is_stopped + next_tokens.eq(A__ ).squeeze()
if is_stopped.all():
break
A__ : Dict = scores / seq_lengths
A__ : Dict = scores.argsort(descending=A__ )
# tokens tensors are already padded to max_seq_length
A__ : Union[str, Any] = [tokens[i] for i in order]
A__ : Any = torch.stack(A__ , dim=0 )
A__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.