code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def A__ ( snake_case_ : str ):
SCREAMING_SNAKE_CASE__: int= hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= hex_num[0] == '''-'''
if is_negative:
SCREAMING_SNAKE_CASE__: Optional[int]= hex_num[1:]
try:
SCREAMING_SNAKE_CASE__: List[str]= int(snake_case_ , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= ''''''
while int_num > 0:
SCREAMING_SNAKE_CASE__: int= str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 1 |
def A__ ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : str=False ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: int= len(set_a.intersection(snake_case_ ) )
if alternative_union:
SCREAMING_SNAKE_CASE__: int= len(snake_case_ ) + len(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: str= len(set_a.union(snake_case_ ) )
return intersection / union
if isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) ):
SCREAMING_SNAKE_CASE__: Any= [element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE__: List[str]= len(snake_case_ ) + len(snake_case_ )
return len(snake_case_ ) / union
else:
SCREAMING_SNAKE_CASE__: Optional[int]= set_a + [element for element in set_b if element not in set_a]
return len(snake_case_ ) / len(snake_case_ )
return len(snake_case_ ) / len(snake_case_ )
return None
if __name__ == "__main__":
lowercase_ : Any = {'a', 'b', 'c', 'd', 'e'}
lowercase_ : Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 64 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 1 |
from __future__ import annotations
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: int= TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(lowerCAmelCase ) != 0:
SCREAMING_SNAKE_CASE__: List[str]= len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase , (int, float) ):
raise error
SCREAMING_SNAKE_CASE__: Optional[Any]= rows
else:
SCREAMING_SNAKE_CASE__: List[str]= []
def UpperCamelCase_ ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCamelCase_ ( self ) -> int:
return len(self.rows )
@property
def UpperCamelCase_ ( self ) -> int:
return len(self.rows[0] )
@property
def UpperCamelCase_ ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCamelCase_ ( self ) -> bool:
return self.order[0] == self.order[1]
def UpperCamelCase_ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE__: Optional[int]= [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCamelCase_ ( self ) -> bool:
return bool(self.determinant() )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: Tuple= [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase ).determinant()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase , lowerCAmelCase )
return -1 * self.get_minor(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(lowerCAmelCase , lowerCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCamelCase_ ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCamelCase_ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE__: int= [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE__: int= self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(lowerCAmelCase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> None:
SCREAMING_SNAKE_CASE__: Any= TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase , (int, float) ):
raise type_error
if len(lowerCAmelCase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.rows[0:position] + [row] + self.rows[position:]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> None:
SCREAMING_SNAKE_CASE__: int= TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase , (int, float) ):
raise type_error
if len(lowerCAmelCase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
SCREAMING_SNAKE_CASE__: List[str]= [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase ) -> bool:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , lowerCAmelCase ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase ) -> Matrix:
if isinstance(lowerCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase , lowerCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , lowerCAmelCase ) -> Matrix:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
SCREAMING_SNAKE_CASE__: str= self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase ) -> int:
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowerCamelCase :
__a = PegasusConfig
__a = {}
__a = "gelu"
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=40 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: str= seq_length
SCREAMING_SNAKE_CASE__: int= is_training
SCREAMING_SNAKE_CASE__: Tuple= use_labels
SCREAMING_SNAKE_CASE__: str= vocab_size
SCREAMING_SNAKE_CASE__: Any= hidden_size
SCREAMING_SNAKE_CASE__: List[str]= num_hidden_layers
SCREAMING_SNAKE_CASE__: Tuple= num_attention_heads
SCREAMING_SNAKE_CASE__: Tuple= intermediate_size
SCREAMING_SNAKE_CASE__: List[Any]= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Optional[int]= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Optional[int]= max_position_embeddings
SCREAMING_SNAKE_CASE__: int= eos_token_id
SCREAMING_SNAKE_CASE__: Any= pad_token_id
SCREAMING_SNAKE_CASE__: Union[str, Any]= bos_token_id
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: str= ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE__: Tuple= tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__: Dict= tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__: str= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__: Tuple= prepare_pegasus_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= TFPegasusModel(config=lowerCAmelCase ).get_decoder()
SCREAMING_SNAKE_CASE__: Dict= inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__: List[Any]= input_ids[:1, :]
SCREAMING_SNAKE_CASE__: List[str]= inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE__: str= inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE__: List[str]= 1
# first forward pass
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__: Union[str, Any]= ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__: List[str]= tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE__: List[Any]= tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE__: Dict= tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE__: List[Any]= int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE__: Tuple= output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase , lowerCAmelCase , rtol=1e-3 )
def A__ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : int=None , snake_case_ : Tuple=None , snake_case_ : Optional[int]=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE__: str= tf.cast(tf.math.not_equal(snake_case_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__: Any= tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE__: int= tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__: str= tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__: List[str]= tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__a = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__a = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a = True
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE__: Optional[Any]= ConfigTester(self , config_class=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
__a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__a = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__a = "google/pegasus-xsum"
@cached_property
def UpperCamelCase_ ( self ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: List[Any]= TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= self.translate_src_text(**lowerCAmelCase )
assert self.expected_text == generated_words
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= self.tokenizer(self.src_text , **lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''tf''' )
SCREAMING_SNAKE_CASE__: Tuple= self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase )
return generated_words
@slow
def UpperCamelCase_ ( self ) -> str:
self._assert_generated_batch_equal_expected()
| 64 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase_ : int = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> None:
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 64 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 1 |
def A__ ( snake_case_ : float , snake_case_ : float ):
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(snake_case_ ) * abs(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 64 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[Any]= {}
SCREAMING_SNAKE_CASE__: Tuple= {}
if prompt is not None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE__: List[Any]= generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE__: Any= {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
SCREAMING_SNAKE_CASE__: str= max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> int:
SCREAMING_SNAKE_CASE__: List[str]= load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '
'''Note also that one single text can be provided for conditional image to text generation.''' )
SCREAMING_SNAKE_CASE__: List[Any]= self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE__: Tuple= self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__: int= self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
SCREAMING_SNAKE_CASE__: int= [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE__: Any= self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__: Tuple= self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'Model type {model_type} does not support conditional text generation' )
else:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE__: Dict= None
return model_inputs
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> List[Any]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , lowerCAmelCase )
and all(x is None for x in model_inputs['''input_ids'''] )
):
SCREAMING_SNAKE_CASE__: Dict= None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE__: int= {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE__: Tuple= model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE__: Tuple= self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: List[str]= []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE__: str= {
'''generated_text''': self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 64 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
def A__ ( snake_case_ : int = 2_000_000 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE__: Any= 1
SCREAMING_SNAKE_CASE__: Union[str, Any]= 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= 1
SCREAMING_SNAKE_CASE__: List[str]= 0
for i in range(snake_case_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 1 |
def A__ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int ):
if height >= 1:
move_tower(height - 1 , snake_case_ , snake_case_ , snake_case_ )
move_disk(snake_case_ , snake_case_ )
move_tower(height - 1 , snake_case_ , snake_case_ , snake_case_ )
def A__ ( snake_case_ : Dict , snake_case_ : List[Any] ):
print('''moving disk from''' , snake_case_ , '''to''' , snake_case_ )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Any= int(input('''Height of hanoi: ''' ).strip() )
move_tower(snake_case_ , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 64 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 1 |
from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : Dict = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = ['PoolFormerFeatureExtractor']
lowercase_ : Optional[Any] = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 64 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase_ : str = 1_6
lowercase_ : List[Any] = 3_2
def A__ ( snake_case_ : Accelerator , snake_case_ : int = 16 ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__: str= load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case_ : str ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__: Optional[Any]= tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__: Optional[Any]= datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__: Dict= tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__: Optional[int]= 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__: Optional[int]= 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__: Optional[int]= 8
else:
SCREAMING_SNAKE_CASE__: str= None
return tokenizer.pad(
snake_case_ , padding='''longest''' , max_length=snake_case_ , pad_to_multiple_of=snake_case_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__: Tuple= DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase_ : int = mocked_dataloaders # noqa: F811
def A__ ( snake_case_ : List[str] , snake_case_ : int ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case_ ) == "1":
SCREAMING_SNAKE_CASE__: Union[str, Any]= 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE__: int= Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE__: List[str]= Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__: List[str]= config['''lr''']
SCREAMING_SNAKE_CASE__: Optional[int]= int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__: Optional[Any]= int(config['''seed'''] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= int(config['''batch_size'''] )
set_seed(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= get_dataloaders(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: int= evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__: Optional[int]= 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__: Any= batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__: Tuple= MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__: Optional[Any]= AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__: Optional[int]= model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__: Tuple= AdamW(params=model.parameters() , lr=snake_case_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__: Union[str, Any]= get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=100 , num_training_steps=(len(snake_case_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE__: Optional[Any]= os.path.split(snake_case_ )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case_ , snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE__: List[Any]= 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__: List[Any]= model(**snake_case_ )
SCREAMING_SNAKE_CASE__: Any= outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE__: List[Any]= loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[Any]= model(**snake_case_ )
SCREAMING_SNAKE_CASE__: Dict= outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
SCREAMING_SNAKE_CASE__: Optional[Any]= metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case_ ),
'''epoch''': epoch,
} , step=snake_case_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A__ ( ):
SCREAMING_SNAKE_CASE__: Any= argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case_ , default=snake_case_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case_ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
SCREAMING_SNAKE_CASE__: Optional[int]= parser.parse_args()
SCREAMING_SNAKE_CASE__: int= {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 64 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 1 |
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] ):
if index == r:
for j in range(snake_case_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
SCREAMING_SNAKE_CASE__: Union[str, Any]= arr[i]
combination_util(snake_case_ , snake_case_ , snake_case_ , index + 1 , snake_case_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def A__ ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Tuple ):
# A temporary array to store all combination one by one
SCREAMING_SNAKE_CASE__: int= [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case_ , snake_case_ , snake_case_ , 0 , snake_case_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase_ : List[Any] = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 64 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
SCREAMING_SNAKE_CASE__: List[str]= model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: str= '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__: List[Any]= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Tuple= PyTorchBenchmark(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= '''sgugger/tiny-distilbert-classification'''
SCREAMING_SNAKE_CASE__: Optional[int]= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , only_pretrain_model=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= PyTorchBenchmark(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: int= '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__: Optional[Any]= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , torchscript=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Any= PyTorchBenchmark(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__: List[Any]= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , fpaa=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[Any]= PyTorchBenchmark(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[Any]= '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__: str= AutoConfig.from_pretrained(lowerCAmelCase )
# set architectures equal to `None`
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Tuple= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: int= PyTorchBenchmark(lowerCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__: Optional[int]= benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__: int= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[Any]= PyTorchBenchmark(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: int= '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__: Any= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCAmelCase , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[int]= PyTorchBenchmark(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__: List[Any]= AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: int= PyTorchBenchmark(lowerCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__: List[Any]= benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: str= '''sshleifer/tinier_bart'''
SCREAMING_SNAKE_CASE__: List[str]= AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= PyTorchBenchmark(lowerCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__: Any= benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= '''sshleifer/tiny-gpt2'''
SCREAMING_SNAKE_CASE__: int= AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Any= PyTorchBenchmark(lowerCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__: List[str]= benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: int= '''sshleifer/tinier_bart'''
SCREAMING_SNAKE_CASE__: int= AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[str]= PyTorchBenchmark(lowerCAmelCase , configs=[config] )
SCREAMING_SNAKE_CASE__: List[str]= benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: int= '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__: Any= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , save_to_csv=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCAmelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(lowerCAmelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(lowerCAmelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(lowerCAmelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(lowerCAmelCase , '''env.csv''' ) , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[Any]= PyTorchBenchmark(lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''env.csv''' ) ).exists() )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: List[str]= '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowerCAmelCase ):
self.assertTrue(hasattr(lowerCAmelCase , '''sequential''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''cumulative''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''current''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__: Dict= PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCAmelCase , '''log.txt''' ) , log_print=lowerCAmelCase , trace_memory_line_by_line=lowerCAmelCase , multi_process=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[Any]= PyTorchBenchmark(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''log.txt''' ) ).exists() )
| 64 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Any = logging.get_logger(__name__)
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Tuple= DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 1_024
SCREAMING_SNAKE_CASE__: Optional[Any]= 4_096
SCREAMING_SNAKE_CASE__: List[str]= 24
SCREAMING_SNAKE_CASE__: Any= 16
SCREAMING_SNAKE_CASE__: str= [5, 11, 17, 23]
SCREAMING_SNAKE_CASE__: List[Any]= [256, 512, 1_024, 1_024]
SCREAMING_SNAKE_CASE__: str= (1, 384, 384)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE__: List[Any]= True
SCREAMING_SNAKE_CASE__: List[Any]= 150
SCREAMING_SNAKE_CASE__: Tuple= '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__: Optional[int]= '''ade20k-id2label.json'''
SCREAMING_SNAKE_CASE__: Dict= json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='''dataset''' ) ) , '''r''' ) )
SCREAMING_SNAKE_CASE__: str= {int(snake_case_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__: Optional[Any]= idalabel
SCREAMING_SNAKE_CASE__: Any= {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__: Optional[Any]= [1, 150, 480, 480]
return config, expected_shape
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: str= ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def A__ ( snake_case_ : Dict ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE__: str= name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE__: Tuple= name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE__: List[Any]= name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE__: List[str]= name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__: List[Any]= name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE__: List[str]= name.replace('''proj''' , '''projection''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE__: Dict= name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__: Optional[int]= name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__: int= name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE__: str= name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE__: int= name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE__: Optional[Any]= name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
SCREAMING_SNAKE_CASE__: str= name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE__: List[str]= name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE__: Optional[int]= name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE__: int= name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE__: int= name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
SCREAMING_SNAKE_CASE__: Tuple= int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE__: Union[str, Any]= name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
SCREAMING_SNAKE_CASE__: List[str]= name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE__: Dict= name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE__: Any= name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
SCREAMING_SNAKE_CASE__: str= name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
SCREAMING_SNAKE_CASE__: Union[str, Any]= name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE__: Tuple= name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE__: Union[str, Any]= name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE__: Union[str, Any]= name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE__: Any= name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE__: Optional[Any]= name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE__: Optional[int]= name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE__: List[str]= name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE__: List[Any]= name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE__: Tuple= name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE__: Tuple= name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE__: Optional[int]= name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
SCREAMING_SNAKE_CASE__: str= name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
SCREAMING_SNAKE_CASE__: Optional[int]= name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
SCREAMING_SNAKE_CASE__: Optional[int]= name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE__: List[Any]= name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE__: Union[str, Any]= name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def A__ ( snake_case_ : Optional[int] , snake_case_ : List[str] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__: Optional[int]= state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
SCREAMING_SNAKE_CASE__: Dict= state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__: Union[str, Any]= in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE__: Optional[int]= in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__: List[str]= in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__: int= in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__: List[Any]= in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= in_proj_bias[-config.hidden_size :]
def A__ ( ):
SCREAMING_SNAKE_CASE__: Optional[int]= '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__: Dict= Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def A__ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= get_dpt_config(snake_case_ )
# load original state_dict from URL
SCREAMING_SNAKE_CASE__: List[Any]= torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(snake_case_ )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__: Tuple= state_dict.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= val
# read in qkv matrices
read_in_q_k_v(snake_case_ , snake_case_ )
# load HuggingFace model
SCREAMING_SNAKE_CASE__: List[str]= DPTForSemanticSegmentation(snake_case_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE__: Tuple= 480 if '''ade''' in checkpoint_url else 384
SCREAMING_SNAKE_CASE__: List[str]= DPTImageProcessor(size=snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= prepare_img()
SCREAMING_SNAKE_CASE__: Dict= image_processor(snake_case_ , return_tensors='''pt''' )
# forward pass
SCREAMING_SNAKE_CASE__: int= model(**snake_case_ ).logits if '''ade''' in checkpoint_url else model(**snake_case_ ).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE__: List[str]= torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE__: int= torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(snake_case_ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , snake_case_ )
)
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case_ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=snake_case_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case_ , snake_case_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=snake_case_ , )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowercase_ : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 64 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : List[str] = {'vocab_file': 'spiece.model'}
lowercase_ : Dict = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
lowercase_ : Tuple = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = None , **lowerCAmelCase , ) -> None:
SCREAMING_SNAKE_CASE__: Optional[Any]= {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
SCREAMING_SNAKE_CASE__: Dict= '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE__: List[Any]= '''<|endoftext|>''' if eos_token is None else eos_token
SCREAMING_SNAKE_CASE__: List[Any]= '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE__: List[str]= unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE__: Dict= eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= '''<pad>''' if pad_token is None else pad_token
SCREAMING_SNAKE_CASE__: List[Any]= '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= do_lower_case
SCREAMING_SNAKE_CASE__: int= remove_space
SCREAMING_SNAKE_CASE__: List[str]= keep_accents
SCREAMING_SNAKE_CASE__: int= vocab_file
SCREAMING_SNAKE_CASE__: int= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE__: Optional[int]= {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE__: str= re.compile(
f'[{"".join(map(lowerCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' )
def __getstate__( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Any= self.__dict__.copy()
SCREAMING_SNAKE_CASE__: Dict= None
return state
def __setstate__( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= {}
SCREAMING_SNAKE_CASE__: Tuple= spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCamelCase_ ( self ) -> int:
return len(self.sp_model )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Any= self.non_printing_characters_re.sub('''''' , lowerCAmelCase )
# Normalize whitespaces
SCREAMING_SNAKE_CASE__: Any= ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE__: Optional[Any]= unicodedata.normalize('''NFC''' , lowerCAmelCase )
return text
def UpperCamelCase_ ( self , lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.preprocess_text(lowerCAmelCase )
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
return self.sp_model.PieceToId(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
return self.sp_model.IdToPiece(lowerCAmelCase )
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase ) -> str:
return out_string
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
SCREAMING_SNAKE_CASE__: Optional[int]= ''''''
SCREAMING_SNAKE_CASE__: Any= False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
SCREAMING_SNAKE_CASE__: Tuple= []
else:
current_sub_tokens.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string
def UpperCamelCase_ ( self ) -> Dict[str, int]:
SCREAMING_SNAKE_CASE__: List[Any]= {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__: Optional[int]= os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: List[Any]= self.preprocess_text(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.sp_model.encode(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Dict= [self.preprocess_text(lowerCAmelCase ) for t in text]
SCREAMING_SNAKE_CASE__: Optional[Any]= self.sp_model.encode(lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE__: Dict= torch.tensor(lowerCAmelCase )
return token_ids
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
return self.sp_model.decode(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[int]:
SCREAMING_SNAKE_CASE__: Tuple= [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE__: int= (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowerCAmelCase ) + f'{self.bos_token}Bot:'
)
return self.encode(text=lowerCAmelCase )
| 64 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase_ : int = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "AutoTokenizer"
__a = ["tokenizer"]
__a = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , lowerCAmelCase , lowerCAmelCase=None ) -> str:
super().__init__(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase="speaker_embeddings_path.json" , **lowerCAmelCase ) -> List[Any]:
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE__: Dict= get_file_from_repo(
lowerCAmelCase , lowerCAmelCase , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase ) , revision=kwargs.pop('''revision''' , lowerCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(lowerCAmelCase , lowerCAmelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
SCREAMING_SNAKE_CASE__: Dict= None
else:
with open(lowerCAmelCase ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE__: List[Any]= json.load(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= AutoTokenizer.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
return cls(tokenizer=lowerCAmelCase , speaker_embeddings=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase="speaker_embeddings_path.json" , lowerCAmelCase="speaker_embeddings" , lowerCAmelCase = False , **lowerCAmelCase , ) -> List[Any]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase , lowerCAmelCase , '''v2''' ) , exist_ok=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= {}
SCREAMING_SNAKE_CASE__: Optional[int]= save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE__: Tuple= self._load_voice_preset(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , lowerCAmelCase , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(lowerCAmelCase , f'{prompt_key}_{key}.npy' )
SCREAMING_SNAKE_CASE__: Dict= tmp_dict
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , '''w''' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
super().save_pretrained(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase = None , **lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE__: Any= {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
SCREAMING_SNAKE_CASE__: List[Any]= get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase ) , revision=kwargs.pop('''revision''' , lowerCAmelCase ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
SCREAMING_SNAKE_CASE__: int= np.load(lowerCAmelCase )
return voice_preset_dict
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> Any:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="pt" , lowerCAmelCase=256 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , **lowerCAmelCase , ) -> List[Any]:
if voice_preset is not None and not isinstance(lowerCAmelCase , lowerCAmelCase ):
if (
isinstance(lowerCAmelCase , lowerCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE__: List[Any]= self._load_voice_preset(lowerCAmelCase )
else:
if isinstance(lowerCAmelCase , lowerCAmelCase ) and not voice_preset.endswith('''.npz''' ):
SCREAMING_SNAKE_CASE__: Tuple= voice_preset + '''.npz'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= np.load(lowerCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.tokenizer(
lowerCAmelCase , return_tensors=lowerCAmelCase , padding='''max_length''' , max_length=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , add_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
if voice_preset is not None:
SCREAMING_SNAKE_CASE__: Dict= voice_preset
return encoded_text
| 64 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A__ ( snake_case_ : Any ):
for param in module.parameters():
SCREAMING_SNAKE_CASE__: str= False
def A__ ( ):
SCREAMING_SNAKE_CASE__: Optional[Any]= '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE__: Any= '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def A__ ( snake_case_ : str ):
SCREAMING_SNAKE_CASE__: int= plt.imshow(snake_case_ )
fig.axes.get_xaxis().set_visible(snake_case_ )
fig.axes.get_yaxis().set_visible(snake_case_ )
plt.show()
def A__ ( ):
SCREAMING_SNAKE_CASE__: List[Any]= datetime.now()
SCREAMING_SNAKE_CASE__: int= current_time.strftime('''%H:%M:%S''' )
return timestamp
| 64 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = MobileBertTokenizer
__a = MobileBertTokenizerFast
__a = True
__a = True
__a = filter_non_english
__a = "google/mobilebert-uncased"
def UpperCamelCase_ ( self ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE__: List[str]= [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__: Any= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__: str= [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__: List[str]= '''unwanted, running'''
return input_text, output_text
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__: Tuple= tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase_ ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__: List[Any]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: List[Any]= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__: Any= tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: List[str]= tokenizer.encode(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# With lower casing
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_tokenizer(do_lower_case=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= self.get_rust_tokenizer(do_lower_case=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__: List[Any]= tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: int= tokenizer.encode(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: str= BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[Any]= BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: int= BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[str]= BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Dict= BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: List[str]= ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
SCREAMING_SNAKE_CASE__: Union[str, Any]= {}
for i, token in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: str= i
SCREAMING_SNAKE_CASE__: Tuple= WordpieceTokenizer(vocab=lowerCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCamelCase_ ( self ) -> Optional[int]:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCamelCase_ ( self ) -> int:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCamelCase_ ( self ) -> int:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: List[Any]= self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCamelCase_ ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
SCREAMING_SNAKE_CASE__: Dict= tokenizer_r.encode_plus(
lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Any= tokenizer_r.do_lower_case if hasattr(lowerCAmelCase , '''do_lower_case''' ) else False
SCREAMING_SNAKE_CASE__: Optional[Any]= (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= ['''的''', '''人''', '''有''']
SCREAMING_SNAKE_CASE__: List[Any]= ''''''.join(lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE__: Dict= True
SCREAMING_SNAKE_CASE__: Optional[Any]= self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= False
SCREAMING_SNAKE_CASE__: Tuple= self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE__: Dict= [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase )
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
| 64 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = XLMTokenizer
__a = False
def UpperCamelCase_ ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__: List[Any]= [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE__: Tuple= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Optional[Any]= '''lower newer'''
SCREAMING_SNAKE_CASE__: Any= '''lower newer'''
return input_text, output_text
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= XLMTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__: Tuple= '''lower'''
SCREAMING_SNAKE_CASE__: Optional[Any]= ['''low''', '''er</w>''']
SCREAMING_SNAKE_CASE__: Any= tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= tokens + ['''<unk>''']
SCREAMING_SNAKE_CASE__: str= [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
SCREAMING_SNAKE_CASE__: Tuple= tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 64 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> int:
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= field
SCREAMING_SNAKE_CASE__: Optional[int]= path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__: Optional[Any]= Json(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , )
def UpperCamelCase_ ( self ) -> Dict:
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__: Dict= self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__: Optional[int]= self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
SCREAMING_SNAKE_CASE__: List[str]= dataset
SCREAMING_SNAKE_CASE__: int= path_or_buf
SCREAMING_SNAKE_CASE__: Dict= batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__: List[Any]= num_proc
SCREAMING_SNAKE_CASE__: int= '''utf-8'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= to_json_kwargs
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: str= self.to_json_kwargs.pop('''path_or_buf''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.to_json_kwargs.pop('''orient''' , '''records''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
SCREAMING_SNAKE_CASE__: int= self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
SCREAMING_SNAKE_CASE__: Dict= self.to_json_kwargs.pop('''compression''' , lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowerCAmelCase ) as buffer:
SCREAMING_SNAKE_CASE__: Optional[Any]= self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''' )
SCREAMING_SNAKE_CASE__: str= self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs )
return written
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= args
SCREAMING_SNAKE_CASE__: Union[str, Any]= query_table(
table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__: List[str]= batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , ) -> int:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
SCREAMING_SNAKE_CASE__: List[str]= self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowerCAmelCase )
return written
| 64 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , lowerCAmelCase=[1, 16, 4, 4] , lowerCAmelCase=None , ) -> List[str]:
SCREAMING_SNAKE_CASE__: str= parent
SCREAMING_SNAKE_CASE__: List[Any]= batch_size
SCREAMING_SNAKE_CASE__: Tuple= image_size
SCREAMING_SNAKE_CASE__: Optional[Any]= patch_size
SCREAMING_SNAKE_CASE__: Optional[Any]= num_channels
SCREAMING_SNAKE_CASE__: Any= is_training
SCREAMING_SNAKE_CASE__: int= use_labels
SCREAMING_SNAKE_CASE__: Tuple= hidden_size
SCREAMING_SNAKE_CASE__: Optional[int]= num_hidden_layers
SCREAMING_SNAKE_CASE__: List[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: Optional[Any]= intermediate_size
SCREAMING_SNAKE_CASE__: List[str]= hidden_act
SCREAMING_SNAKE_CASE__: int= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Optional[Any]= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Tuple= type_sequence_label_size
SCREAMING_SNAKE_CASE__: List[Any]= initializer_range
SCREAMING_SNAKE_CASE__: List[Any]= scope
SCREAMING_SNAKE_CASE__: List[Any]= backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE__: int= (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE__: int= num_patches + 1
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: List[Any]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: Tuple= self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= ViTHybridModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= self.type_sequence_label_size
SCREAMING_SNAKE_CASE__: List[Any]= ViTHybridForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= config_and_inputs
SCREAMING_SNAKE_CASE__: List[str]= {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__a = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Tuple= ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__: Optional[int]= model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: int= model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__: int= [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__: int= ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: Optional[int]= _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(config=lowerCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE__: Optional[int]= [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: Optional[Any]= ViTHybridModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.default_image_processor
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_img()
SCREAMING_SNAKE_CASE__: str= image_processor(images=lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__: int= model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__: List[str]= torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= torch.tensor([-1.9090, -0.4993, -0.2389] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: int= ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
SCREAMING_SNAKE_CASE__: Dict= ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
SCREAMING_SNAKE_CASE__: str= prepare_img()
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processor(images=lowerCAmelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= model(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE__: Optional[int]= logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 64 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 1 |
from __future__ import annotations
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= text, pattern
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= len(lowerCAmelCase ), len(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def UpperCamelCase_ ( self ) -> list[int]:
# searches pattern in text and returns index positions
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.mismatch_in_text(lowerCAmelCase )
if mismatch_index == -1:
positions.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__: int= (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase_ : List[str] = 'ABAABA'
lowercase_ : Any = 'AB'
lowercase_ : Optional[Any] = BoyerMooreSearch(text, pattern)
lowercase_ : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 64 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 1 |
import string
from math import logaa
def A__ ( snake_case_ : str , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Optional[int]= document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
SCREAMING_SNAKE_CASE__: Tuple= document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A__ ( snake_case_ : str , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: str= corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
SCREAMING_SNAKE_CASE__: Optional[Any]= corpus_without_punctuation.split('''\n''' )
SCREAMING_SNAKE_CASE__: List[Any]= term.lower()
return (len([doc for doc in docs if term in doc] ), len(snake_case_ ))
def A__ ( snake_case_ : int , snake_case_ : int , snake_case_ : Tuple=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def A__ ( snake_case_ : int , snake_case_ : int ):
return round(tf * idf , 3 )
| 64 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def A__ ( ):
SCREAMING_SNAKE_CASE__: Tuple= HfArgumentParser(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= parser.parse_args_into_dataclasses()[0]
SCREAMING_SNAKE_CASE__: Optional[int]= TensorFlowBenchmark(args=snake_case_ )
try:
SCREAMING_SNAKE_CASE__: Union[str, Any]= parser.parse_args_into_dataclasses()[0]
except ValueError as e:
SCREAMING_SNAKE_CASE__: Dict= '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
SCREAMING_SNAKE_CASE__: Optional[int]= ''' '''.join(str(snake_case_ ).split(''' ''' )[:-1] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ''''''
SCREAMING_SNAKE_CASE__: Tuple= eval(str(snake_case_ ).split(''' ''' )[-1] )
SCREAMING_SNAKE_CASE__: Tuple= []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(snake_case_ )
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE__: Union[str, Any]= full_error_msg + begin_error_msg + str(snake_case_ )
raise ValueError(snake_case_ )
benchmark.run()
if __name__ == "__main__":
main()
| 64 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase_ : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCamelCase ( unittest.TestCase ):
__a = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__a = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__a = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: str= ZeroShotClassificationPipeline(
model=lowerCAmelCase , tokenizer=lowerCAmelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# No kwarg
SCREAMING_SNAKE_CASE__: int= classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: List[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
SCREAMING_SNAKE_CASE__: Dict= classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(lowerCAmelCase , {'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
SCREAMING_SNAKE_CASE__: str= classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(1 )
] , )
SCREAMING_SNAKE_CASE__: Tuple= classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
lowerCAmelCase , [
{'''sequence''': ANY(lowerCAmelCase ), '''labels''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )], '''scores''': [ANY(lowerCAmelCase ), ANY(lowerCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCAmelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier(lowerCAmelCase , candidate_labels='''politics''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(lowerCAmelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=lowerCAmelCase )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(lowerCAmelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=lowerCAmelCase , )
self.run_entailment_id(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= zero_shot_classifier.model.config
SCREAMING_SNAKE_CASE__: str= config.labelaid
SCREAMING_SNAKE_CASE__: int= zero_shot_classifier.entailment_id
SCREAMING_SNAKE_CASE__: str= {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
SCREAMING_SNAKE_CASE__: Any= {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: List[Any]= {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
SCREAMING_SNAKE_CASE__: Tuple= {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
SCREAMING_SNAKE_CASE__: Dict= original_labelaid
self.assertEqual(lowerCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Union[str, Any]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
SCREAMING_SNAKE_CASE__: List[str]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
SCREAMING_SNAKE_CASE__: List[Any]= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Dict= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__: Tuple= zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
SCREAMING_SNAKE_CASE__: Any= zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=lowerCAmelCase , )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 64 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( UpperCamelCase_ ):
__a = (IPNDMScheduler,)
__a = (("num_inference_steps", 50),)
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= {'''num_train_timesteps''': 1000}
config.update(**lowerCAmelCase )
return config
def UpperCamelCase_ ( self , lowerCAmelCase=0 , **lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[Any]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: Optional[int]= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self.dummy_sample
SCREAMING_SNAKE_CASE__: Tuple= 0.1 * sample
SCREAMING_SNAKE_CASE__: Any= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: Tuple= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__: Optional[Any]= dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE__: List[Any]= scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= scheduler_class.from_pretrained(lowerCAmelCase )
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE__: Dict= dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__: Union[str, Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: Optional[int]= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE__: List[str]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: str= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
def UpperCamelCase_ ( self , lowerCAmelCase=0 , **lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: Optional[int]= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= self.dummy_sample
SCREAMING_SNAKE_CASE__: str= 0.1 * sample
SCREAMING_SNAKE_CASE__: Dict= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_scheduler_config()
SCREAMING_SNAKE_CASE__: Tuple= scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE__: List[Any]= dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE__: Tuple= scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= scheduler_class.from_pretrained(lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE__: int= dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__: Optional[Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: List[str]= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE__: Tuple= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: List[Any]= new_scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__: List[Any]= self.get_scheduler_config(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= 10
SCREAMING_SNAKE_CASE__: Dict= self.dummy_model()
SCREAMING_SNAKE_CASE__: Optional[int]= self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__: str= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE__: List[Any]= kwargs.pop('''num_inference_steps''' , lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_scheduler_config()
SCREAMING_SNAKE_CASE__: int= scheduler_class(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.dummy_sample
SCREAMING_SNAKE_CASE__: Any= 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase , '''set_timesteps''' ):
scheduler.set_timesteps(lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase , '''set_timesteps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE__: List[str]= [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
SCREAMING_SNAKE_CASE__: List[Any]= dummy_past_residuals[:]
SCREAMING_SNAKE_CASE__: List[Any]= scheduler.timesteps[5]
SCREAMING_SNAKE_CASE__: str= scheduler.timesteps[6]
SCREAMING_SNAKE_CASE__: List[str]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: Any= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
SCREAMING_SNAKE_CASE__: Union[str, Any]= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE__: Any= scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self ) -> str:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase , time_step=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase , time_step=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.full_loop()
SCREAMING_SNAKE_CASE__: int= torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 64 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 1 |
def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase_ : int = logging.get_logger(__name__)
def A__ ( snake_case_ : Any , snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: List[str]= set()
SCREAMING_SNAKE_CASE__: int= []
def parse_line(snake_case_ : List[str] ):
for line in fp:
if isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: Dict= line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE__: List[str]= '''\n'''.join(snake_case_ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(snake_case_ )
buffer.clear()
continue
else:
SCREAMING_SNAKE_CASE__: int= line.strip()
buffer.append(snake_case_ )
if from_gh:
for filename in os.listdir(snake_case_ ):
SCREAMING_SNAKE_CASE__: Optional[Any]= os.path.join(snake_case_ , snake_case_ )
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with open(snake_case_ ) as fp:
parse_line(snake_case_ )
else:
try:
with zipfile.ZipFile(snake_case_ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case_ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(snake_case_ ) as fp:
parse_line(snake_case_ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def A__ ( snake_case_ : Dict , snake_case_ : Any ):
SCREAMING_SNAKE_CASE__: List[str]= set()
SCREAMING_SNAKE_CASE__: Tuple= [os.path.join(snake_case_ , snake_case_ ) for p in os.listdir(snake_case_ ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(snake_case_ , snake_case_ ) )
return selected_warnings
if __name__ == "__main__":
def A__ ( snake_case_ : int ):
return values.split(''',''' )
lowercase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
lowercase_ : Tuple = parser.parse_args()
lowercase_ : List[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase_ : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase_ : Optional[Any] = extract_warnings(args.output_dir, args.targets)
lowercase_ : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 64 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowercase_ : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
__a = None
__a = "utf-8"
__a = None
__a = None
__a = True # deprecated
__a = None # deprecated
__a = 10 << 20 # 10MB
__a = None
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
__a = JsonConfig
def UpperCamelCase_ ( self ) -> Union[str, Any]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
SCREAMING_SNAKE_CASE__: Dict= self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
SCREAMING_SNAKE_CASE__: List[Any]= dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__: List[Any]= data_files
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[Any]= [files]
SCREAMING_SNAKE_CASE__: Optional[int]= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE__: List[str]= []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[int]= [files]
SCREAMING_SNAKE_CASE__: int= [dl_manager.iter_files(lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def UpperCamelCase_ ( self , lowerCAmelCase ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE__: Any= self.config.features.arrow_schema.field(lowerCAmelCase ).type
SCREAMING_SNAKE_CASE__: str= pa_table.append_column(lowerCAmelCase , pa.array([None] * len(lowerCAmelCase ) , type=lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__: int= table_cast(lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE__: str= json.load(lowerCAmelCase )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE__: Any= dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(lowerCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE__: int= set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE__: Optional[int]= {col: [row.get(lowerCAmelCase ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE__: List[str]= dataset
SCREAMING_SNAKE_CASE__: Optional[Any]= pa.Table.from_pydict(lowerCAmelCase )
yield file_idx, self._cast_table(lowerCAmelCase )
# If the file has one json object per line
else:
with open(lowerCAmelCase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE__: Tuple= max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE__: List[Any]= (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
SCREAMING_SNAKE_CASE__: Optional[int]= f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE__: Dict= batch.decode(self.config.encoding , errors=lowerCAmelCase ).encode('''utf-8''' )
try:
while True:
try:
SCREAMING_SNAKE_CASE__: List[str]= paj.read_json(
io.BytesIO(lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(lowerCAmelCase )
or block_size > len(lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'Batch of {len(lowerCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE__: Tuple= json.load(lowerCAmelCase )
except json.JSONDecodeError:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(lowerCAmelCase , lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE__: str= set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE__: List[str]= {col: [row.get(lowerCAmelCase ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE__: Dict= pa.Table.from_pydict(lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None
yield file_idx, self._cast_table(lowerCAmelCase )
break
else:
logger.error(f'Failed to read file \'{file}\' with error {type(lowerCAmelCase )}: {e}' )
raise ValueError(
f'Not able to read records in the JSON file at {file}. '
f'You should probably indicate the field of the JSON file containing your records. '
f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '
f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase )
batch_idx += 1
| 64 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "mvp"
__a = ["past_key_values"]
__a = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , lowerCAmelCase=50267 , lowerCAmelCase=1024 , lowerCAmelCase=12 , lowerCAmelCase=4096 , lowerCAmelCase=16 , lowerCAmelCase=12 , lowerCAmelCase=4096 , lowerCAmelCase=16 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase="gelu" , lowerCAmelCase=1024 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=2 , lowerCAmelCase=2 , lowerCAmelCase=False , lowerCAmelCase=100 , lowerCAmelCase=800 , **lowerCAmelCase , ) -> Dict:
SCREAMING_SNAKE_CASE__: List[Any]= vocab_size
SCREAMING_SNAKE_CASE__: Optional[int]= max_position_embeddings
SCREAMING_SNAKE_CASE__: int= d_model
SCREAMING_SNAKE_CASE__: Optional[Any]= encoder_ffn_dim
SCREAMING_SNAKE_CASE__: Dict= encoder_layers
SCREAMING_SNAKE_CASE__: Optional[Any]= encoder_attention_heads
SCREAMING_SNAKE_CASE__: List[str]= decoder_ffn_dim
SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_layers
SCREAMING_SNAKE_CASE__: Dict= decoder_attention_heads
SCREAMING_SNAKE_CASE__: Any= dropout
SCREAMING_SNAKE_CASE__: str= attention_dropout
SCREAMING_SNAKE_CASE__: List[Any]= activation_dropout
SCREAMING_SNAKE_CASE__: Optional[int]= activation_function
SCREAMING_SNAKE_CASE__: Optional[int]= init_std
SCREAMING_SNAKE_CASE__: List[str]= encoder_layerdrop
SCREAMING_SNAKE_CASE__: Union[str, Any]= decoder_layerdrop
SCREAMING_SNAKE_CASE__: List[Any]= classifier_dropout
SCREAMING_SNAKE_CASE__: List[Any]= use_cache
SCREAMING_SNAKE_CASE__: Dict= encoder_layers
SCREAMING_SNAKE_CASE__: List[Any]= scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__: int= use_prompt
SCREAMING_SNAKE_CASE__: List[Any]= prompt_length
SCREAMING_SNAKE_CASE__: str= prompt_mid_dim
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
| 64 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 1 |
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["onnx"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
requires_backends(cls , ['''onnx'''] )
| 64 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 1 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def A__ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[str]= cva.getAffineTransform(snake_case_ , snake_case_ )
return cva.warpAffine(snake_case_ , snake_case_ , (rows, cols) )
if __name__ == "__main__":
# read original image
lowercase_ : Any = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
lowercase_ : Union[str, Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowercase_ , lowercase_ : int = gray_img.shape
# set different points to rotate image
lowercase_ : List[str] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
lowercase_ : str = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
lowercase_ : List[str] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
lowercase_ : Optional[Any] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
lowercase_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowercase_ : Union[str, Any] = plt.figure(1)
lowercase_ : Dict = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 1 |
def A__ ( snake_case_ : int , snake_case_ : int ):
while second != 0:
SCREAMING_SNAKE_CASE__: Optional[int]= first & second
first ^= second
SCREAMING_SNAKE_CASE__: Dict= c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Union[str, Any] = int(input('Enter the first number: ').strip())
lowercase_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A__ ( snake_case_ : Any ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= image.size
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__: Tuple= image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
SCREAMING_SNAKE_CASE__: List[Any]= np.array(snake_case_ ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__: Optional[int]= image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__: Dict= torch.from_numpy(snake_case_ )
return 2.0 * image - 1.0
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 100 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__: str= 1
elif isinstance(lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE__: int= image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase )}' )
if isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= preprocess(lowerCAmelCase )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE__: Union[str, Any]= (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE__: Optional[Any]= next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE__: Optional[int]= randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= image.to(device=self.device , dtype=lowerCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase , device=self.device )
SCREAMING_SNAKE_CASE__: Any= self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__: List[Any]= latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__: Dict= '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__: str= {}
if accepts_eta:
SCREAMING_SNAKE_CASE__: List[Any]= eta
for t in self.progress_bar(lowerCAmelCase ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.cat([latents, image] , dim=1 )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE__: Dict= self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__: Any= self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE__: Tuple= self.vqvae.decode(lowerCAmelCase ).sample
SCREAMING_SNAKE_CASE__: Optional[int]= torch.clamp(lowerCAmelCase , -1.0 , 1.0 )
SCREAMING_SNAKE_CASE__: Optional[Any]= image / 2 + 0.5
SCREAMING_SNAKE_CASE__: List[Any]= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__: Tuple= self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 64 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
def A__ ( snake_case_ : int | float | str ):
try:
SCREAMING_SNAKE_CASE__: Optional[Any]= float(snake_case_ )
except ValueError:
raise ValueError('''Please enter a valid number''' )
SCREAMING_SNAKE_CASE__: Tuple= decimal - int(snake_case_ )
if fractional_part == 0:
return int(snake_case_ ), 1
else:
SCREAMING_SNAKE_CASE__: Optional[int]= len(str(snake_case_ ).split('''.''' )[1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= int(decimal * (10**number_of_frac_digits) )
SCREAMING_SNAKE_CASE__: List[Any]= 10**number_of_frac_digits
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= denominator, numerator
while True:
SCREAMING_SNAKE_CASE__: Union[str, Any]= dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= divisor, remainder
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= numerator / divisor, denominator / divisor
return int(snake_case_ ), int(snake_case_ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(89.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 64 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "linear"
__a = "cosine"
__a = "cosine_with_restarts"
__a = "polynomial"
__a = "constant"
__a = "constant_with_warmup"
__a = "piecewise_constant"
def A__ ( snake_case_ : Optimizer , snake_case_ : int = -1 ):
return LambdaLR(snake_case_ , lambda snake_case_ : 1 , last_epoch=snake_case_ )
def A__ ( snake_case_ : Optimizer , snake_case_ : int , snake_case_ : int = -1 ):
def lr_lambda(snake_case_ : int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1.0 , snake_case_ ) )
return 1.0
return LambdaLR(snake_case_ , snake_case_ , last_epoch=snake_case_ )
def A__ ( snake_case_ : Optimizer , snake_case_ : str , snake_case_ : int = -1 ):
SCREAMING_SNAKE_CASE__: Optional[int]= {}
SCREAMING_SNAKE_CASE__: Tuple= step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= rule_str.split(''':''' )
SCREAMING_SNAKE_CASE__: Dict= int(snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= float(snake_case_ )
SCREAMING_SNAKE_CASE__: Any= value
SCREAMING_SNAKE_CASE__: Optional[int]= float(rule_list[-1] )
def create_rules_function(snake_case_ : Any , snake_case_ : int ):
def rule_func(snake_case_ : int ) -> float:
SCREAMING_SNAKE_CASE__: Any= sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE__: Union[str, Any]= create_rules_function(snake_case_ , snake_case_ )
return LambdaLR(snake_case_ , snake_case_ , last_epoch=snake_case_ )
def A__ ( snake_case_ : Any , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict=-1 ):
def lr_lambda(snake_case_ : int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1 , snake_case_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case_ , snake_case_ , snake_case_ )
def A__ ( snake_case_ : Optimizer , snake_case_ : int , snake_case_ : int , snake_case_ : float = 0.5 , snake_case_ : int = -1 ):
def lr_lambda(snake_case_ : Tuple ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1 , snake_case_ ) )
SCREAMING_SNAKE_CASE__: Any= float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case_ ) * 2.0 * progress )) )
return LambdaLR(snake_case_ , snake_case_ , snake_case_ )
def A__ ( snake_case_ : Optimizer , snake_case_ : int , snake_case_ : int , snake_case_ : int = 1 , snake_case_ : int = -1 ):
def lr_lambda(snake_case_ : Optional[int] ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1 , snake_case_ ) )
SCREAMING_SNAKE_CASE__: Dict= float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case_ ) * progress) % 1.0) )) )
return LambdaLR(snake_case_ , snake_case_ , snake_case_ )
def A__ ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Tuple=1E-7 , snake_case_ : Dict=1.0 , snake_case_ : int=-1 ):
SCREAMING_SNAKE_CASE__: int= optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(snake_case_ : int ):
if current_step < num_warmup_steps:
return float(snake_case_ ) / float(max(1 , snake_case_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE__: Optional[int]= lr_init - lr_end
SCREAMING_SNAKE_CASE__: Optional[int]= num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE__: List[Any]= 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE__: Optional[Any]= lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case_ , snake_case_ , snake_case_ )
lowercase_ : Tuple = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__ ( snake_case_ : Union[str, SchedulerType] , snake_case_ : Optimizer , snake_case_ : Optional[str] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 1 , snake_case_ : float = 1.0 , snake_case_ : int = -1 , ):
SCREAMING_SNAKE_CASE__: List[str]= SchedulerType(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case_ , last_epoch=snake_case_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case_ , step_rules=snake_case_ , last_epoch=snake_case_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case_ , num_warmup_steps=snake_case_ , last_epoch=snake_case_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case_ , num_warmup_steps=snake_case_ , num_training_steps=snake_case_ , num_cycles=snake_case_ , last_epoch=snake_case_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case_ , num_warmup_steps=snake_case_ , num_training_steps=snake_case_ , power=snake_case_ , last_epoch=snake_case_ , )
return schedule_func(
snake_case_ , num_warmup_steps=snake_case_ , num_training_steps=snake_case_ , last_epoch=snake_case_ )
| 64 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 1 |
def A__ ( snake_case_ : str , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Tuple= len(snake_case_ )
SCREAMING_SNAKE_CASE__: int= len(snake_case_ )
SCREAMING_SNAKE_CASE__: Union[str, Any]= [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
SCREAMING_SNAKE_CASE__: Tuple= True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
SCREAMING_SNAKE_CASE__: Dict= True
if a[i].islower():
SCREAMING_SNAKE_CASE__: List[str]= True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _lowerCamelCase ( datasets.BeamBasedBuilder ):
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Dict:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
class _lowerCamelCase ( datasets.BeamBasedBuilder ):
def UpperCamelCase_ ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
def A__ ( ):
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def A__ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _lowerCamelCase ( UpperCamelCase_ ):
@require_beam
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Optional[int]= len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__: str= DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
SCREAMING_SNAKE_CASE__: str= builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def UpperCamelCase_ ( self ) -> int:
import apache_beam as beam
SCREAMING_SNAKE_CASE__: Tuple= beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE__: List[Any]= len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__: str= DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
SCREAMING_SNAKE_CASE__: Union[str, Any]= partial(lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def UpperCamelCase_ ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__: List[str]= DummyBeamDataset(cache_dir=lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__: Tuple= NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
SCREAMING_SNAKE_CASE__: List[str]= builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 64 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 1 |
from __future__ import annotations
from typing import Any
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0 ) -> None:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= row, column
SCREAMING_SNAKE_CASE__: int= [[default_value for c in range(lowerCAmelCase )] for r in range(lowerCAmelCase )]
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE__: Union[str, Any]= f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
SCREAMING_SNAKE_CASE__: List[str]= 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE__: str= max(lowerCAmelCase , len(str(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__: Union[str, Any]= f'%{max_element_length}s'
# Make string and return
def single_line(lowerCAmelCase ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE__: List[str]= '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCAmelCase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> bool:
if not (isinstance(lowerCAmelCase , (list, tuple) ) and len(lowerCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCAmelCase ) -> Any:
assert self.validate_indicies(lowerCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
assert self.validate_indicies(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= value
def __add__( self , lowerCAmelCase ) -> Matrix:
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE__: Union[str, Any]= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__: Optional[int]= self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
SCREAMING_SNAKE_CASE__: Union[str, Any]= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__: str= -self[r, c]
return result
def __sub__( self , lowerCAmelCase ) -> Matrix:
return self + (-another)
def __mul__( self , lowerCAmelCase ) -> Matrix:
if isinstance(lowerCAmelCase , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE__: int= Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__: Any= self[r, c] * another
return result
elif isinstance(lowerCAmelCase , lowerCAmelCase ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE__: Optional[int]= Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE__: Tuple= f'Unsupported type given for another ({type(lowerCAmelCase )})'
raise TypeError(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Matrix:
SCREAMING_SNAKE_CASE__: List[str]= Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= self[r, c]
return result
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> Any:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE__: Dict= v.transpose()
SCREAMING_SNAKE_CASE__: Tuple= (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A__ ( ):
# a^(-1)
SCREAMING_SNAKE_CASE__: Dict= Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE__: Any= 1
print(F'a^(-1) is {ainv}' )
# u, v
SCREAMING_SNAKE_CASE__: str= Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= 1, 2, -3
SCREAMING_SNAKE_CASE__: Tuple= Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case_ , snake_case_ )}' )
def A__ ( ):
import doctest
doctest.testmod()
testa()
| 64 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : Optional[int] = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 64 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowercase_ : Union[str, Any] = random.Random()
def A__ ( snake_case_ : int , snake_case_ : Any=1.0 , snake_case_ : str=None , snake_case_ : List[Any]=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: str= global_rng
SCREAMING_SNAKE_CASE__: Any= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=10 , lowerCAmelCase=160 , lowerCAmelCase=8 , lowerCAmelCase=0.0 , lowerCAmelCase=4000 , lowerCAmelCase=False , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[Any]= parent
SCREAMING_SNAKE_CASE__: Optional[Any]= batch_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= min_seq_length
SCREAMING_SNAKE_CASE__: List[str]= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[int]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: List[Any]= sampling_rate
SCREAMING_SNAKE_CASE__: Dict= return_attention_mask
SCREAMING_SNAKE_CASE__: int= do_normalize
SCREAMING_SNAKE_CASE__: List[Any]= feature_size
SCREAMING_SNAKE_CASE__: List[Any]= chunk_length
SCREAMING_SNAKE_CASE__: List[Any]= hop_length
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Union[str, Any]:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: Dict= [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: Any= [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: List[str]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= WhisperFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__: List[Any]= feat_extract_first.save_pretrained(lowerCAmelCase )[0]
check_json_file_has_correct_format(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE__: List[str]= feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE__: Any= feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__: Any= os.path.join(lowerCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class.from_json_file(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= feat_extract_first.to_dict()
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract_second.to_dict()
SCREAMING_SNAKE_CASE__: List[str]= feat_extract_first.mel_filters
SCREAMING_SNAKE_CASE__: Dict= feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: int= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Union[str, Any]= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: Tuple= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__: List[str]= feature_extractor(lowerCAmelCase , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE__: str= feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE__: int= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Tuple= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: str= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test truncation required
SCREAMING_SNAKE_CASE__: List[Any]= [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
SCREAMING_SNAKE_CASE__: Any= [x[: feature_extractor.n_samples] for x in speech_inputs]
SCREAMING_SNAKE_CASE__: Tuple= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs_truncated]
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
SCREAMING_SNAKE_CASE__: int= feature_extractor(lowerCAmelCase , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: List[str]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: Tuple= np.random.rand(100 , 32 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Union[str, Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Any= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self ) -> Dict:
# fmt: off
SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: List[str]= WhisperFeatureExtractor()
SCREAMING_SNAKE_CASE__: Optional[int]= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: Tuple= self._load_datasamples(1 )[0]
SCREAMING_SNAKE_CASE__: Optional[Any]= ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
SCREAMING_SNAKE_CASE__: Dict= feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase )[0]
self.assertTrue(np.all(np.mean(lowerCAmelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase ) - 1 ) < 1e-3 ) )
| 64 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 1 |
import argparse
import struct
import unittest
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= data
# Initialize hash values
SCREAMING_SNAKE_CASE__: Optional[Any]= [
0X6_a_0_9_e_6_6_7,
0Xb_b_6_7_a_e_8_5,
0X3_c_6_e_f_3_7_2,
0Xa_5_4_f_f_5_3_a,
0X5_1_0_e_5_2_7_f,
0X9_b_0_5_6_8_8_c,
0X1_f_8_3_d_9_a_b,
0X5_b_e_0_c_d_1_9,
]
# Initialize round constants
SCREAMING_SNAKE_CASE__: List[str]= [
0X4_2_8_a_2_f_9_8,
0X7_1_3_7_4_4_9_1,
0Xb_5_c_0_f_b_c_f,
0Xe_9_b_5_d_b_a_5,
0X3_9_5_6_c_2_5_b,
0X5_9_f_1_1_1_f_1,
0X9_2_3_f_8_2_a_4,
0Xa_b_1_c_5_e_d_5,
0Xd_8_0_7_a_a_9_8,
0X1_2_8_3_5_b_0_1,
0X2_4_3_1_8_5_b_e,
0X5_5_0_c_7_d_c_3,
0X7_2_b_e_5_d_7_4,
0X8_0_d_e_b_1_f_e,
0X9_b_d_c_0_6_a_7,
0Xc_1_9_b_f_1_7_4,
0Xe_4_9_b_6_9_c_1,
0Xe_f_b_e_4_7_8_6,
0X0_f_c_1_9_d_c_6,
0X2_4_0_c_a_1_c_c,
0X2_d_e_9_2_c_6_f,
0X4_a_7_4_8_4_a_a,
0X5_c_b_0_a_9_d_c,
0X7_6_f_9_8_8_d_a,
0X9_8_3_e_5_1_5_2,
0Xa_8_3_1_c_6_6_d,
0Xb_0_0_3_2_7_c_8,
0Xb_f_5_9_7_f_c_7,
0Xc_6_e_0_0_b_f_3,
0Xd_5_a_7_9_1_4_7,
0X0_6_c_a_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_b_7_0_a_8_5,
0X2_e_1_b_2_1_3_8,
0X4_d_2_c_6_d_f_c,
0X5_3_3_8_0_d_1_3,
0X6_5_0_a_7_3_5_4,
0X7_6_6_a_0_a_b_b,
0X8_1_c_2_c_9_2_e,
0X9_2_7_2_2_c_8_5,
0Xa_2_b_f_e_8_a_1,
0Xa_8_1_a_6_6_4_b,
0Xc_2_4_b_8_b_7_0,
0Xc_7_6_c_5_1_a_3,
0Xd_1_9_2_e_8_1_9,
0Xd_6_9_9_0_6_2_4,
0Xf_4_0_e_3_5_8_5,
0X1_0_6_a_a_0_7_0,
0X1_9_a_4_c_1_1_6,
0X1_e_3_7_6_c_0_8,
0X2_7_4_8_7_7_4_c,
0X3_4_b_0_b_c_b_5,
0X3_9_1_c_0_c_b_3,
0X4_e_d_8_a_a_4_a,
0X5_b_9_c_c_a_4_f,
0X6_8_2_e_6_f_f_3,
0X7_4_8_f_8_2_e_e,
0X7_8_a_5_6_3_6_f,
0X8_4_c_8_7_8_1_4,
0X8_c_c_7_0_2_0_8,
0X9_0_b_e_f_f_f_a,
0Xa_4_5_0_6_c_e_b,
0Xb_e_f_9_a_3_f_7,
0Xc_6_7_1_7_8_f_2,
]
SCREAMING_SNAKE_CASE__: Any= self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( lowerCAmelCase ) -> bytes:
SCREAMING_SNAKE_CASE__: Optional[int]= B'''\x80''' + (B'''\x00''' * (63 - (len(lowerCAmelCase ) + 8) % 64))
SCREAMING_SNAKE_CASE__: List[Any]= struct.pack('''>Q''' , (len(lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ) -> None:
# Convert into blocks of 64 bytes
SCREAMING_SNAKE_CASE__: Dict= [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE__: Any= list(struct.unpack('''>16L''' , lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE__: Union[str, Any]= (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE__: List[Any]= (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
SCREAMING_SNAKE_CASE__: str= self.ror(lowerCAmelCase , 6 ) ^ self.ror(lowerCAmelCase , 11 ) ^ self.ror(lowerCAmelCase , 25 )
SCREAMING_SNAKE_CASE__: List[str]= (e & f) ^ ((~e & 0Xf_f_f_f_f_f_f_f) & g)
SCREAMING_SNAKE_CASE__: List[str]= (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
SCREAMING_SNAKE_CASE__: Optional[int]= self.ror(lowerCAmelCase , 2 ) ^ self.ror(lowerCAmelCase , 13 ) ^ self.ror(lowerCAmelCase , 22 )
SCREAMING_SNAKE_CASE__: Tuple= (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE__: List[Any]= (sa + maj) % 0X1_0_0_0_0_0_0_0_0
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
SCREAMING_SNAKE_CASE__: List[str]= [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE__: Optional[Any]= [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE__: str= ''''''.join([hex(lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
return 0Xf_f_f_f_f_f_f_f & (value << (32 - rotations)) | (value >> rotations)
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> None:
import hashlib
SCREAMING_SNAKE_CASE__: Tuple= bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(lowerCAmelCase ).hash , hashlib.shaaaa(lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__: Tuple= argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
SCREAMING_SNAKE_CASE__: int= parser.parse_args()
SCREAMING_SNAKE_CASE__: str= args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__: Tuple= f.read()
else:
SCREAMING_SNAKE_CASE__: Optional[int]= bytes(snake_case_ , '''utf-8''' )
print(SHAaaa(snake_case_ ).hash )
if __name__ == "__main__":
main()
| 64 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A__ ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
SCREAMING_SNAKE_CASE__: List[Any]= args.log_outputs
SCREAMING_SNAKE_CASE__: Optional[int]= '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
SCREAMING_SNAKE_CASE__: Dict= load_metric('''wer''' )
SCREAMING_SNAKE_CASE__: Optional[int]= load_metric('''cer''' )
# compute metrics
SCREAMING_SNAKE_CASE__: Optional[Any]= wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
SCREAMING_SNAKE_CASE__: List[str]= cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
SCREAMING_SNAKE_CASE__: Tuple= F'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(F'{dataset_id}_eval_results.txt' , '''w''' ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
SCREAMING_SNAKE_CASE__: Tuple= F'log_{dataset_id}_predictions.txt'
SCREAMING_SNAKE_CASE__: Any= F'log_{dataset_id}_targets.txt'
with open(snake_case_ , '''w''' ) as p, open(snake_case_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
p.write(F'{i}' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F'{i}' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(snake_case_ , with_indices=snake_case_ )
def A__ ( snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Optional[int]= '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
SCREAMING_SNAKE_CASE__: Optional[Any]= re.sub(snake_case_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
SCREAMING_SNAKE_CASE__: Tuple= ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
SCREAMING_SNAKE_CASE__: str= ''' '''.join(text.split(snake_case_ ) )
return text
def A__ ( snake_case_ : Tuple ):
# load dataset
SCREAMING_SNAKE_CASE__: Union[str, Any]= load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
SCREAMING_SNAKE_CASE__: Dict= AutoFeatureExtractor.from_pretrained(args.model_id )
SCREAMING_SNAKE_CASE__: Dict= feature_extractor.sampling_rate
# resample audio
SCREAMING_SNAKE_CASE__: Tuple= dataset.cast_column('''audio''' , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
SCREAMING_SNAKE_CASE__: Tuple= 0 if torch.cuda.is_available() else -1
SCREAMING_SNAKE_CASE__: int= pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Optional[Any]= asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
SCREAMING_SNAKE_CASE__: int= prediction['''text''']
SCREAMING_SNAKE_CASE__: Dict= normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
SCREAMING_SNAKE_CASE__: Tuple= dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowercase_ : Optional[int] = parser.parse_args()
main(args)
| 64 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 1 |
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase_ : Tuple = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase_ : int = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A__ ( snake_case_ : list[list[int]] ):
SCREAMING_SNAKE_CASE__: Optional[Any]= []
for i in range(len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Optional[int]= []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
SCREAMING_SNAKE_CASE__: List[str]= 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(snake_case_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(snake_case_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(snake_case_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
SCREAMING_SNAKE_CASE__: Dict= cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(snake_case_ )
return next_generation
def A__ ( snake_case_ : list[list[int]] , snake_case_ : int ):
SCREAMING_SNAKE_CASE__: str= []
for _ in range(snake_case_ ):
# Create output image
SCREAMING_SNAKE_CASE__: Any= Image.new('''RGB''' , (len(cells[0] ), len(snake_case_ )) )
SCREAMING_SNAKE_CASE__: List[str]= img.load()
# Save cells to image
for x in range(len(snake_case_ ) ):
for y in range(len(cells[0] ) ):
SCREAMING_SNAKE_CASE__: Optional[Any]= 255 - cells[y][x] * 255
SCREAMING_SNAKE_CASE__: Union[str, Any]= (colour, colour, colour)
# Save image
images.append(snake_case_ )
SCREAMING_SNAKE_CASE__: int= new_generation(snake_case_ )
return images
if __name__ == "__main__":
lowercase_ : str = generate_images(GLIDER, 1_6)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 64 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE__: Optional[int]= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: Optional[Any]= num_channels
SCREAMING_SNAKE_CASE__: List[Any]= image_size
SCREAMING_SNAKE_CASE__: Any= min_resolution
SCREAMING_SNAKE_CASE__: Tuple= max_resolution
SCREAMING_SNAKE_CASE__: str= do_resize
SCREAMING_SNAKE_CASE__: Dict= size
SCREAMING_SNAKE_CASE__: List[str]= apply_ocr
def UpperCamelCase_ ( self ) -> Dict:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: List[str]= LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''apply_ocr''' ) )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase_ ( self ) -> str:
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Tuple= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase )
self.assertIsInstance(encoding.boxes , lowerCAmelCase )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: int= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: int= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: str= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: List[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Any= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Any:
# with apply_OCR = True
SCREAMING_SNAKE_CASE__: List[Any]= LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Dict= load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
SCREAMING_SNAKE_CASE__: str= Image.open(ds[0]['''file'''] ).convert('''RGB''' )
SCREAMING_SNAKE_CASE__: int= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE__: Union[str, Any]= [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
SCREAMING_SNAKE_CASE__: Dict= [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase )
self.assertListEqual(encoding.boxes , lowerCAmelCase )
# with apply_OCR = False
SCREAMING_SNAKE_CASE__: str= LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 64 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 64 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 1 |
from math import sqrt
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[str]= 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def A__ ( snake_case_ : int = 10_000 ):
SCREAMING_SNAKE_CASE__: int= sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 64 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = TextToVideoSDPipeline
__a = TEXT_TO_IMAGE_PARAMS
__a = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__a = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE__: Optional[int]= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
SCREAMING_SNAKE_CASE__: Dict= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Tuple= {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Dict:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Any= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Any= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: str= '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: List[Any]= TextToVideoSDPipeline(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= '''np'''
SCREAMING_SNAKE_CASE__: Optional[int]= sd_pipe(**lowerCAmelCase ).frames
SCREAMING_SNAKE_CASE__: Union[str, Any]= frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__: Any= np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self ) -> int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def UpperCamelCase_ ( self ) -> List[str]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def UpperCamelCase_ ( self ) -> Dict:
pass
def UpperCamelCase_ ( self ) -> Dict:
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
SCREAMING_SNAKE_CASE__: Any= TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE__: int= pipe.to('''cuda''' )
SCREAMING_SNAKE_CASE__: List[str]= '''Spiderman is surfing'''
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type='''pt''' ).frames
SCREAMING_SNAKE_CASE__: List[str]= video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: List[str]= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
SCREAMING_SNAKE_CASE__: List[Any]= TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
SCREAMING_SNAKE_CASE__: Any= pipe.to('''cuda''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= '''Spiderman is surfing'''
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: Dict= pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type='''pt''' ).frames
SCREAMING_SNAKE_CASE__: Dict= video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowercase_ : Dict = parser.parse_args()
lowercase_ : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase_ : Any = CLIPImageProcessor()
lowercase_ : str = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowercase_ : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 64 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 1 |
from __future__ import annotations
from math import pow, sqrt
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(snake_case_ , 2 ) - pow(snake_case_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(snake_case_ , 2 ) - pow(snake_case_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(snake_case_ , 2 ) + pow(snake_case_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Tuple= tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE__: Optional[Any]= ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
SCREAMING_SNAKE_CASE__: Dict= dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
SCREAMING_SNAKE_CASE__: Optional[int]= {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__: Any= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__: List[str]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[int]= {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__: Any= os.path.join(self.tmpdirname , lowerCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Optional[int]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self , **lowerCAmelCase ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Tuple= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__: Optional[Any]= [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__: str= CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__: Dict= CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Tuple= CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__: Dict= self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE__: List[str]= self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Any= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__: Any= image_processor(lowerCAmelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__: str= processor(images=lowerCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: int= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= '''lower newer'''
SCREAMING_SNAKE_CASE__: List[Any]= processor(text=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= tokenizer(lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: int= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= '''lower newer'''
SCREAMING_SNAKE_CASE__: Dict= self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__: Dict= processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_image_processor()
SCREAMING_SNAKE_CASE__: List[Any]= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: Optional[Any]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__: Optional[Any]= self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__: List[str]= processor(images=lowerCAmelCase , visual_prompt=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.get_image_processor()
SCREAMING_SNAKE_CASE__: Tuple= self.get_tokenizer()
SCREAMING_SNAKE_CASE__: Optional[Any]= CLIPSegProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__: Any= processor.batch_decode(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
| 64 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def A__ ( snake_case_ : Any ):
SCREAMING_SNAKE_CASE__: List[str]= {}
SCREAMING_SNAKE_CASE__: List[str]= job['''started_at''']
SCREAMING_SNAKE_CASE__: int= job['''completed_at''']
SCREAMING_SNAKE_CASE__: str= date_parser.parse(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= date_parser.parse(snake_case_ )
SCREAMING_SNAKE_CASE__: List[Any]= round((end_datetime - start_datetime).total_seconds() / 60.0 )
SCREAMING_SNAKE_CASE__: List[Any]= start
SCREAMING_SNAKE_CASE__: List[str]= end
SCREAMING_SNAKE_CASE__: List[str]= duration_in_min
return job_info
def A__ ( snake_case_ : Dict , snake_case_ : Dict=None ):
SCREAMING_SNAKE_CASE__: str= None
if token is not None:
SCREAMING_SNAKE_CASE__: Optional[int]= {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'Bearer {token}'}
SCREAMING_SNAKE_CASE__: Optional[Any]= F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
SCREAMING_SNAKE_CASE__: Tuple= requests.get(snake_case_ , headers=snake_case_ ).json()
SCREAMING_SNAKE_CASE__: Dict= {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE__: int= math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Optional[Any]= requests.get(url + F'&page={i + 2}' , headers=snake_case_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(snake_case_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowercase_ : Any = parser.parse_args()
lowercase_ : Union[str, Any] = get_job_time(args.workflow_run_id)
lowercase_ : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 64 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 1 |
def A__ ( snake_case_ : int ):
return str(snake_case_ ) == str(snake_case_ )[::-1]
def A__ ( snake_case_ : int ):
return int(snake_case_ ) + int(str(snake_case_ )[::-1] )
def A__ ( snake_case_ : int = 10_000 ):
SCREAMING_SNAKE_CASE__: Dict= []
for num in range(1 , snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= 0
SCREAMING_SNAKE_CASE__: Optional[Any]= num
while iterations < 50:
SCREAMING_SNAKE_CASE__: Optional[int]= sum_reverse(snake_case_ )
iterations += 1
if is_palindrome(snake_case_ ):
break
else:
lychrel_nums.append(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowerCamelCase ( UpperCamelCase_ ):
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , '''num_attention_heads''' ) )
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=3 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=16 , lowerCAmelCase=[128, 256, 384] , lowerCAmelCase=[4, 6, 8] , lowerCAmelCase=[2, 3, 4] , lowerCAmelCase=[16, 16, 16] , lowerCAmelCase=0 , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: int= image_size
SCREAMING_SNAKE_CASE__: Any= num_channels
SCREAMING_SNAKE_CASE__: Tuple= kernel_size
SCREAMING_SNAKE_CASE__: List[str]= stride
SCREAMING_SNAKE_CASE__: Tuple= padding
SCREAMING_SNAKE_CASE__: str= hidden_sizes
SCREAMING_SNAKE_CASE__: List[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: Any= depths
SCREAMING_SNAKE_CASE__: Tuple= key_dim
SCREAMING_SNAKE_CASE__: Union[str, Any]= drop_path_rate
SCREAMING_SNAKE_CASE__: Optional[Any]= patch_size
SCREAMING_SNAKE_CASE__: List[str]= attention_ratio
SCREAMING_SNAKE_CASE__: List[str]= mlp_ratio
SCREAMING_SNAKE_CASE__: Dict= initializer_range
SCREAMING_SNAKE_CASE__: str= [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE__: str= is_training
SCREAMING_SNAKE_CASE__: Optional[Any]= use_labels
SCREAMING_SNAKE_CASE__: str= num_labels
SCREAMING_SNAKE_CASE__: Optional[int]= initializer_range
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__: Optional[int]= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__: List[Any]= self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[int]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= LevitModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE__: int= floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE__: Optional[int]= floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: str= self.num_labels
SCREAMING_SNAKE_CASE__: int= LevitForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= config_and_inputs
SCREAMING_SNAKE_CASE__: List[Any]= {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: int= LevitModelTester(self )
SCREAMING_SNAKE_CASE__: Any= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ) -> Optional[Any]:
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def UpperCamelCase_ ( self ) -> int:
pass
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: Any= model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__: Union[str, Any]= [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Tuple= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[int]= outputs.hidden_states
SCREAMING_SNAKE_CASE__: List[str]= len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE__: str= floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE__: Dict= floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: str= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__: List[str]= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: int= True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Optional[int]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(**lowerCAmelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__: int= False
SCREAMING_SNAKE_CASE__: Dict= True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__: List[str]= model_class(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Optional[int]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= model(**lowerCAmelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: Optional[Any]= [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE__: str= problem_type['''title''']
SCREAMING_SNAKE_CASE__: Optional[int]= problem_type['''num_labels''']
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__: str= inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE__: Optional[Any]= inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE__: Any= model(**lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCamelCase_ ( self ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: str= LevitModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Tuple= Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> Dict:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: str= LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= self.default_image_processor
SCREAMING_SNAKE_CASE__: Any= prepare_img()
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processor(images=lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[str]= model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__: List[str]= torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 64 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A__ ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Any ):
SCREAMING_SNAKE_CASE__: Optional[int]= multiprocessing.Manager()
SCREAMING_SNAKE_CASE__: List[str]= manager.list()
SCREAMING_SNAKE_CASE__: List[str]= multiprocessing.Process(target=snake_case_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A__ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
SCREAMING_SNAKE_CASE__: List[str]= shutil.rmtree
SCREAMING_SNAKE_CASE__: Union[str, Any]= os.rmdir
SCREAMING_SNAKE_CASE__: List[Any]= os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
SCREAMING_SNAKE_CASE__: Tuple= {}
with swallow_io():
with time_limit(snake_case_ ):
exec(snake_case_ , snake_case_ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(F'failed: {e}' )
# Needed for cleaning up.
SCREAMING_SNAKE_CASE__: int= rmtree
SCREAMING_SNAKE_CASE__: List[Any]= rmdir
SCREAMING_SNAKE_CASE__: Any= chdir
@contextlib.contextmanager
def A__ ( snake_case_ : int ):
def signal_handler(snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , snake_case_ )
signal.signal(signal.SIGALRM , snake_case_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A__ ( ):
SCREAMING_SNAKE_CASE__: List[str]= WriteOnlyStringIO()
with contextlib.redirect_stdout(snake_case_ ):
with contextlib.redirect_stderr(snake_case_ ):
with redirect_stdin(snake_case_ ):
yield
@contextlib.contextmanager
def A__ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(snake_case_ ):
yield dirname
class _lowerCamelCase ( UpperCamelCase_ ):
pass
class _lowerCamelCase ( io.StringIO ):
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> str:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> str:
raise OSError
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
return False
class _lowerCamelCase ( contextlib._RedirectStream ): # type: ignore
__a = "stdin"
@contextlib.contextmanager
def A__ ( snake_case_ : Dict ):
if root == ".":
yield
return
SCREAMING_SNAKE_CASE__: Tuple= os.getcwd()
os.chdir(snake_case_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(snake_case_ )
def A__ ( snake_case_ : Union[str, Any]=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Tuple= None
import os
SCREAMING_SNAKE_CASE__: Any= '''1'''
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Tuple= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: str= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Tuple= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: Optional[int]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
import shutil
SCREAMING_SNAKE_CASE__: List[str]= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Tuple= None
import subprocess
SCREAMING_SNAKE_CASE__: int= None # type: ignore
SCREAMING_SNAKE_CASE__: Optional[int]= None
import sys
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
SCREAMING_SNAKE_CASE__: Union[str, Any]= None
| 64 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "facebook/bart-large-mnli"
__a = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__a = "text_classifier"
__a = AutoTokenizer
__a = AutoModelForSequenceClassification
__a = ["text", ["text"]]
__a = ["text"]
def UpperCamelCase_ ( self ) -> Optional[Any]:
super().setup()
SCREAMING_SNAKE_CASE__: str= self.model.config
SCREAMING_SNAKE_CASE__: Optional[Any]= -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
SCREAMING_SNAKE_CASE__: Optional[Any]= int(lowerCAmelCase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= labels
return self.pre_processor(
[text] * len(lowerCAmelCase ) , [f'This example is {label}' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= outputs.logits
SCREAMING_SNAKE_CASE__: Optional[int]= torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 64 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 1 |
class _lowerCamelCase :
def __init__( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= ''''''
SCREAMING_SNAKE_CASE__: int= ''''''
SCREAMING_SNAKE_CASE__: Optional[int]= []
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
SCREAMING_SNAKE_CASE__: List[str]= self.__min_dist_top_down_dp(lowerCAmelCase , n - 1 )
SCREAMING_SNAKE_CASE__: str= self.__min_dist_top_down_dp(m - 1 , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.__min_dist_top_down_dp(m - 1 , n - 1 )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 + min(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self.dp[m][n]
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: Optional[int]= worda
SCREAMING_SNAKE_CASE__: Optional[int]= worda
SCREAMING_SNAKE_CASE__: Optional[int]= [[-1 for _ in range(len(lowerCAmelCase ) )] for _ in range(len(lowerCAmelCase ) )]
return self.__min_dist_top_down_dp(len(lowerCAmelCase ) - 1 , len(lowerCAmelCase ) - 1 )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__: int= worda
SCREAMING_SNAKE_CASE__: Any= worda
SCREAMING_SNAKE_CASE__: List[str]= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
SCREAMING_SNAKE_CASE__: Optional[Any]= j
elif j == 0: # second string is empty
SCREAMING_SNAKE_CASE__: List[Any]= i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
SCREAMING_SNAKE_CASE__: Dict= self.dp[i - 1][j - 1]
else:
SCREAMING_SNAKE_CASE__: List[Any]= self.dp[i][j - 1]
SCREAMING_SNAKE_CASE__: int= self.dp[i - 1][j]
SCREAMING_SNAKE_CASE__: Tuple= self.dp[i - 1][j - 1]
SCREAMING_SNAKE_CASE__: str= 1 + min(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowercase_ : Any = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
lowercase_ : Optional[Any] = input('Enter the first string: ').strip()
lowercase_ : List[Any] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
@register_to_config
def __init__( self , lowerCAmelCase = 65536 , lowerCAmelCase = None , lowerCAmelCase = 2 , lowerCAmelCase = 2 , lowerCAmelCase = 0 , lowerCAmelCase = "fourier" , lowerCAmelCase = True , lowerCAmelCase = False , lowerCAmelCase = 0.0 , lowerCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase = "UNetMidBlock1D" , lowerCAmelCase = None , lowerCAmelCase = (32, 32, 64) , lowerCAmelCase = None , lowerCAmelCase = 8 , lowerCAmelCase = 1 , lowerCAmelCase = False , ) -> str:
super().__init__()
SCREAMING_SNAKE_CASE__: Union[str, Any]= sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE__: List[Any]= GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase , log=lowerCAmelCase , flip_sin_to_cos=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE__: Union[str, Any]= Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase , downscale_freq_shift=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE__: Optional[int]= block_out_channels[0] * 4
SCREAMING_SNAKE_CASE__: Tuple= TimestepEmbedding(
in_channels=lowerCAmelCase , time_embed_dim=lowerCAmelCase , act_fn=lowerCAmelCase , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE__: Any= nn.ModuleList([] )
SCREAMING_SNAKE_CASE__: int= None
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.ModuleList([] )
SCREAMING_SNAKE_CASE__: List[str]= None
# down
SCREAMING_SNAKE_CASE__: Optional[Any]= in_channels
for i, down_block_type in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Dict= output_channel
SCREAMING_SNAKE_CASE__: str= block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE__: Any= i == len(lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE__: Tuple= get_down_block(
lowerCAmelCase , num_layers=lowerCAmelCase , in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase )
# mid
SCREAMING_SNAKE_CASE__: Optional[int]= get_mid_block(
lowerCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase , add_downsample=lowerCAmelCase , )
# up
SCREAMING_SNAKE_CASE__: Any= list(reversed(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: List[str]= reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE__: str= out_channels
else:
SCREAMING_SNAKE_CASE__: str= block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Any= output_channel
SCREAMING_SNAKE_CASE__: Any= (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE__: Union[str, Any]= i == len(lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE__: List[str]= get_up_block(
lowerCAmelCase , num_layers=lowerCAmelCase , in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= output_channel
# out
SCREAMING_SNAKE_CASE__: Any= norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE__: int= get_out_block(
out_block_type=lowerCAmelCase , num_groups_out=lowerCAmelCase , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase , act_fn=lowerCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , ) -> Union[UNetaDOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Dict= timestep
if not torch.is_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Dict= torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__: Optional[int]= timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE__: Dict= self.time_proj(lowerCAmelCase )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE__: Any= self.time_mlp(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[Any]= timestep_embed[..., None]
SCREAMING_SNAKE_CASE__: Dict= timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE__: Tuple= timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE__: Optional[Any]= ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= downsample_block(hidden_states=lowerCAmelCase , temb=lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE__: List[Any]= self.mid_block(lowerCAmelCase , lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE__: Optional[Any]= down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE__: int= upsample_block(lowerCAmelCase , res_hidden_states_tuple=lowerCAmelCase , temb=lowerCAmelCase )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE__: Any= self.out_block(lowerCAmelCase , lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase )
| 64 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "MCTCTFeatureExtractor"
__a = "AutoTokenizer"
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
super().__init__(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= self.feature_extractor
SCREAMING_SNAKE_CASE__: Optional[int]= False
def __call__( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE__: str= kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE__: str= kwargs.pop('''audio''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''sampling_rate''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''text''' , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__: Dict= args[0]
SCREAMING_SNAKE_CASE__: Any= args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE__: Dict= self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__: Tuple= self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__: List[str]= encodings['''input_ids''']
return inputs
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= kwargs.pop('''input_features''' , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= kwargs.pop('''labels''' , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__: Optional[int]= args[0]
SCREAMING_SNAKE_CASE__: Optional[int]= args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__: int= self.feature_extractor.pad(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
if labels is not None:
SCREAMING_SNAKE_CASE__: List[Any]= self.tokenizer.pad(lowerCAmelCase , **lowerCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__: Tuple= labels['''input_ids''']
return input_features
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> int:
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@contextmanager
def UpperCamelCase_ ( self ) -> str:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= True
SCREAMING_SNAKE_CASE__: List[Any]= self.tokenizer
yield
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extractor
SCREAMING_SNAKE_CASE__: List[str]= False
| 64 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase_ : int = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Dict= ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
lowercase_ : Tuple = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def A__ ( snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: Optional[int]= list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE__: List[Any]= key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE__: int= new_key.replace(snake_case_ , snake_case_ )
print(F'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= s_dict.pop(snake_case_ )
return s_dict
def A__ ( snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= emb.weight.shape
SCREAMING_SNAKE_CASE__: str= nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
SCREAMING_SNAKE_CASE__: Any= emb.weight.data
return lin_layer
def A__ ( snake_case_ : str , snake_case_ : str ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= os.path.basename(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= url.split('''/''' )[-2]
SCREAMING_SNAKE_CASE__: List[Any]= os.path.join(snake_case_ , snake_case_ )
if os.path.exists(snake_case_ ) and not os.path.isfile(snake_case_ ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(snake_case_ ):
SCREAMING_SNAKE_CASE__: Tuple= open(snake_case_ , '''rb''' ).read()
if hashlib.shaaaa(snake_case_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(snake_case_ ) as source, open(snake_case_ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=snake_case_ , unit_divisor=1_024 ) as loop:
while True:
SCREAMING_SNAKE_CASE__: List[str]= source.read(8_192 )
if not buffer:
break
output.write(snake_case_ )
loop.update(len(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= open(snake_case_ , '''rb''' ).read()
if hashlib.shaaaa(snake_case_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def A__ ( snake_case_ : Optional[Any] , snake_case_ : int ):
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE__: Dict= _download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE__: Tuple= torch.load(snake_case_ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__: str= original_checkpoint['''dims''']
SCREAMING_SNAKE_CASE__: Optional[Any]= original_checkpoint['''model_state_dict''']
SCREAMING_SNAKE_CASE__: Any= state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(snake_case_ )
rename_keys(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[Any]= True
SCREAMING_SNAKE_CASE__: Optional[int]= state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
SCREAMING_SNAKE_CASE__: Dict= WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=snake_case_ , decoder_ffn_dim=snake_case_ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
SCREAMING_SNAKE_CASE__: str= WhisperForConditionalGeneration(snake_case_ )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= model.model.load_state_dict(snake_case_ , strict=snake_case_ )
if len(snake_case_ ) > 0 and not set(snake_case_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F' but all the following weights are missing {missing}' )
if tie_embeds:
SCREAMING_SNAKE_CASE__: List[Any]= make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE__: str= proj_out_weights
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowercase_ : int = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 64 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase_ : int = get_tests_dir('fixtures')
lowercase_ : Dict = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowercase_ : Union[str, Any] = get_tests_dir('fixtures/dummy-config.json')
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Dict= 0
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__: List[Any]= WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE__: Any= AutoFeatureExtractor.from_pretrained(lowerCAmelCase ).to_dict()
config_dict.pop('''feature_extractor_type''' )
SCREAMING_SNAKE_CASE__: Dict= WavaVecaFeatureExtractor(**lowerCAmelCase )
# save in new folder
model_config.save_pretrained(lowerCAmelCase )
config.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__: Optional[int]= json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: Optional[int]= AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Dict:
with self.assertRaisesRegex(
lowerCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__: str= AutoFeatureExtractor.from_pretrained('''bert-base''' )
def UpperCamelCase_ ( self ) -> Any:
with self.assertRaisesRegex(
lowerCAmelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__: Any= AutoFeatureExtractor.from_pretrained(lowerCAmelCase , revision='''aaaaaa''' )
def UpperCamelCase_ ( self ) -> int:
with self.assertRaisesRegex(
lowerCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__: Tuple= AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase_ ( self ) -> List[Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[Any]= AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[int]= AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= AutoFeatureExtractor.from_pretrained(lowerCAmelCase , trust_remote_code=lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def UpperCamelCase_ ( self ) -> Any:
try:
AutoConfig.register('''custom''' , lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__: Optional[int]= CustomFeatureExtractor.from_pretrained(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self ) -> Optional[int]:
class _lowerCamelCase ( UpperCamelCase_ ):
__a = True
try:
AutoConfig.register('''custom''' , lowerCAmelCase )
AutoFeatureExtractor.register(lowerCAmelCase , lowerCAmelCase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__: Union[str, Any]= AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__: int= AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__: Any= AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(lowerCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 64 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 1 |
from __future__ import annotations
def A__ ( snake_case_ : str , snake_case_ : list[str] | None = None ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= word_bank or []
# create a table
SCREAMING_SNAKE_CASE__: int= len(snake_case_ ) + 1
SCREAMING_SNAKE_CASE__: list[list[list[str]]]= []
for _ in range(snake_case_ ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE__: Tuple= [[]] # because empty string has empty combination
# iterate through the indices
for i in range(snake_case_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(snake_case_ )] == word:
SCREAMING_SNAKE_CASE__: list[list[str]]= [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(snake_case_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(snake_case_ )]:
combination.reverse()
return table[len(snake_case_ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 64 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class _lowerCamelCase ( UpperCamelCase_ ):
__a = "xlm-roberta"
def __init__( self , lowerCAmelCase=30522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ) -> Union[str, Any]:
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= vocab_size
SCREAMING_SNAKE_CASE__: Tuple= hidden_size
SCREAMING_SNAKE_CASE__: List[Any]= num_hidden_layers
SCREAMING_SNAKE_CASE__: Optional[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: Any= hidden_act
SCREAMING_SNAKE_CASE__: List[Any]= intermediate_size
SCREAMING_SNAKE_CASE__: str= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Optional[int]= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: List[str]= max_position_embeddings
SCREAMING_SNAKE_CASE__: Optional[Any]= type_vocab_size
SCREAMING_SNAKE_CASE__: List[Any]= initializer_range
SCREAMING_SNAKE_CASE__: Tuple= layer_norm_eps
SCREAMING_SNAKE_CASE__: Dict= position_embedding_type
SCREAMING_SNAKE_CASE__: Optional[Any]= use_cache
SCREAMING_SNAKE_CASE__: int= classifier_dropout
class _lowerCamelCase ( UpperCamelCase_ ):
@property
def UpperCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__: Any= {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__: Any= {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 64 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=128 , lowerCAmelCase=32 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= parent
SCREAMING_SNAKE_CASE__: Union[str, Any]= batch_size
SCREAMING_SNAKE_CASE__: Any= seq_length
SCREAMING_SNAKE_CASE__: Optional[int]= is_training
SCREAMING_SNAKE_CASE__: List[Any]= use_input_mask
SCREAMING_SNAKE_CASE__: Dict= use_token_type_ids
SCREAMING_SNAKE_CASE__: Any= use_labels
SCREAMING_SNAKE_CASE__: Optional[Any]= vocab_size
SCREAMING_SNAKE_CASE__: Union[str, Any]= hidden_size
SCREAMING_SNAKE_CASE__: Any= num_hidden_layers
SCREAMING_SNAKE_CASE__: List[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: List[str]= intermediate_size
SCREAMING_SNAKE_CASE__: str= hidden_act
SCREAMING_SNAKE_CASE__: str= hidden_dropout_prob
SCREAMING_SNAKE_CASE__: Dict= attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__: Dict= max_position_embeddings
SCREAMING_SNAKE_CASE__: Optional[Any]= type_vocab_size
SCREAMING_SNAKE_CASE__: Optional[Any]= type_sequence_label_size
SCREAMING_SNAKE_CASE__: List[str]= initializer_range
SCREAMING_SNAKE_CASE__: Optional[Any]= num_labels
SCREAMING_SNAKE_CASE__: List[str]= num_choices
SCREAMING_SNAKE_CASE__: Optional[int]= scope
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__: Optional[int]= None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__: List[Any]= random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__: Optional[int]= None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__: Optional[int]= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__: Optional[Any]= None
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: int= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: List[str]= ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ) -> Optional[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def UpperCamelCase_ ( self ) -> str:
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): int= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: str= True
SCREAMING_SNAKE_CASE__: Any= floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__: Union[str, Any]= ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: List[Any]= NezhaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= True
SCREAMING_SNAKE_CASE__: Optional[int]= NezhaModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[Any]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Optional[int]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: str= NezhaForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Dict= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= NezhaForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= NezhaForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[Any]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= NezhaForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Optional[Any]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.num_labels
SCREAMING_SNAKE_CASE__: Tuple= NezhaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Any= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= self.num_labels
SCREAMING_SNAKE_CASE__: str= NezhaForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= self.num_choices
SCREAMING_SNAKE_CASE__: List[Any]= NezhaForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[str]= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__: Optional[int]= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__: str= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): Optional[int]= config_and_inputs
SCREAMING_SNAKE_CASE__: Tuple= {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__a = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a = True
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: int= torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= NezhaModelTester(self )
SCREAMING_SNAKE_CASE__: Optional[Any]= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
), (
SCREAMING_SNAKE_CASE__
),
): List[Any]= self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE__: int= None
self.model_tester.create_and_check_model_as_decoder(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[int]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: str= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: int= NezhaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE__: Tuple= True
SCREAMING_SNAKE_CASE__: Any= model_class(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= torch.jit.trace(
lowerCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase , os.path.join(lowerCAmelCase , '''bert.pt''' ) )
SCREAMING_SNAKE_CASE__: Any= torch.jit.load(os.path.join(lowerCAmelCase , '''bert.pt''' ) , map_location=lowerCAmelCase )
loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ) , inputs_dict['''attention_mask'''].to(lowerCAmelCase ) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[int]= NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: List[str]= torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1e-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
SCREAMING_SNAKE_CASE__: int= torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: List[Any]= torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase , atol=1e-4 ) )
| 64 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ : Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["input_values", "attention_mask"]
def __init__( self , lowerCAmelCase = 1 , lowerCAmelCase = 16000 , lowerCAmelCase = 0.0 , lowerCAmelCase = False , lowerCAmelCase = 80 , lowerCAmelCase = 16 , lowerCAmelCase = 64 , lowerCAmelCase = "hann_window" , lowerCAmelCase = 1.0 , lowerCAmelCase = 80 , lowerCAmelCase = 7600 , lowerCAmelCase = 1e-10 , lowerCAmelCase = 2 , lowerCAmelCase = True , **lowerCAmelCase , ) -> str:
super().__init__(feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= do_normalize
SCREAMING_SNAKE_CASE__: Optional[Any]= return_attention_mask
SCREAMING_SNAKE_CASE__: Optional[int]= num_mel_bins
SCREAMING_SNAKE_CASE__: Union[str, Any]= hop_length
SCREAMING_SNAKE_CASE__: Optional[int]= win_length
SCREAMING_SNAKE_CASE__: Dict= win_function
SCREAMING_SNAKE_CASE__: str= frame_signal_scale
SCREAMING_SNAKE_CASE__: Optional[int]= fmin
SCREAMING_SNAKE_CASE__: Any= fmax
SCREAMING_SNAKE_CASE__: Union[str, Any]= mel_floor
SCREAMING_SNAKE_CASE__: Tuple= reduction_factor
SCREAMING_SNAKE_CASE__: Dict= win_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE__: int= hop_length * sampling_rate // 1000
SCREAMING_SNAKE_CASE__: List[str]= optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__: List[Any]= (self.n_fft // 2) + 1
SCREAMING_SNAKE_CASE__: List[str]= window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCamelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE__: Any= np.array(lowerCAmelCase , np.intaa )
SCREAMING_SNAKE_CASE__: Optional[Any]= []
for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE__: str= (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE__: Optional[int]= padding_value
normed_input_values.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: List[str]= [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCamelCase_ ( self , lowerCAmelCase , ) -> np.ndarray:
SCREAMING_SNAKE_CASE__: Tuple= spectrogram(
lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
SCREAMING_SNAKE_CASE__: str= self._process_audio(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__: int= None
if audio_target is not None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self._process_audio(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , )
if inputs is None:
return inputs_target
else:
SCREAMING_SNAKE_CASE__: Tuple= inputs_target['''input_values''']
SCREAMING_SNAKE_CASE__: List[str]= inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE__: Dict= decoder_attention_mask
return inputs
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature:
SCREAMING_SNAKE_CASE__: Tuple= isinstance(lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
SCREAMING_SNAKE_CASE__: int= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__: Optional[int]= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__: Dict= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__: int= speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__: List[str]= [speech]
# needed to make pad() work on spectrogram inputs
SCREAMING_SNAKE_CASE__: List[str]= self.feature_size
# convert into correct format for padding
if is_target:
SCREAMING_SNAKE_CASE__: List[str]= [self._extract_mel_features(lowerCAmelCase ) for waveform in speech]
SCREAMING_SNAKE_CASE__: int= BatchFeature({'''input_values''': features} )
SCREAMING_SNAKE_CASE__: str= self.num_mel_bins
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= BatchFeature({'''input_values''': speech} )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pad(
lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: str= feature_size_hack
# convert input values to correct format
SCREAMING_SNAKE_CASE__: Union[str, Any]= padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
SCREAMING_SNAKE_CASE__: Dict= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowerCAmelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
SCREAMING_SNAKE_CASE__: List[str]= [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__: Dict= input_values.astype(np.floataa )
# convert attention_mask to correct format
SCREAMING_SNAKE_CASE__: Union[str, Any]= padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
SCREAMING_SNAKE_CASE__: List[str]= (
attention_mask
if self._get_padding_strategies(lowerCAmelCase , max_length=lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
SCREAMING_SNAKE_CASE__: Any= self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=lowerCAmelCase , padding_value=self.padding_value )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__: List[str]= padded_inputs.convert_to_tensors(lowerCAmelCase )
return padded_inputs
def UpperCamelCase_ ( self ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE__: Tuple= super().to_dict()
# Don't serialize these as they are derived from the other properties.
SCREAMING_SNAKE_CASE__: List[str]= ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 64 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 1 |
def A__ ( snake_case_ : list , snake_case_ : int = 0 ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= length or len(snake_case_ )
SCREAMING_SNAKE_CASE__: Tuple= False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE__: Tuple= True
return list_data if not swapped else bubble_sort(snake_case_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= 10
SCREAMING_SNAKE_CASE__: Tuple= datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
SCREAMING_SNAKE_CASE__: Optional[int]= datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(snake_case_ ) ),
} , features=snake_case_ , )
return dataset
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[Any] , snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Any= str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=snake_case_ )
return filename
# FILE_CONTENT + files
lowercase_ : int = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Optional[int]= tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
SCREAMING_SNAKE_CASE__: str= FILE_CONTENT
with open(snake_case_ , '''w''' ) as f:
f.write(snake_case_ )
return filename
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Dict ):
import bza
SCREAMING_SNAKE_CASE__: str= tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
SCREAMING_SNAKE_CASE__: Any= bytes(snake_case_ , '''utf-8''' )
with bza.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Optional[int] ):
import gzip
SCREAMING_SNAKE_CASE__: Union[str, Any]= str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
SCREAMING_SNAKE_CASE__: Optional[int]= bytes(snake_case_ , '''utf-8''' )
with gzip.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Tuple ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE__: Tuple= tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
SCREAMING_SNAKE_CASE__: Union[str, Any]= bytes(snake_case_ , '''utf-8''' )
with lza.frame.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[str] , snake_case_ : Tuple ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE__: List[str]= tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(snake_case_ , '''w''' ) as archive:
archive.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Any , snake_case_ : List[Any] ):
import tarfile
SCREAMING_SNAKE_CASE__: int= tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Dict ):
import lzma
SCREAMING_SNAKE_CASE__: List[str]= tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
SCREAMING_SNAKE_CASE__: Tuple= bytes(snake_case_ , '''utf-8''' )
with lzma.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[Any] , snake_case_ : Tuple ):
import zipfile
SCREAMING_SNAKE_CASE__: Optional[Any]= tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Dict ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE__: List[str]= tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
SCREAMING_SNAKE_CASE__: str= bytes(snake_case_ , '''utf-8''' )
with zstd.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Dict= tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
SCREAMING_SNAKE_CASE__: Optional[int]= textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(snake_case_ , '''w''' ) as f:
f.write(snake_case_ )
return filename
lowercase_ : Union[str, Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowercase_ : Union[str, Any] = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowercase_ : str = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowercase_ : Optional[int] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowercase_ : List[str] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='''session''' )
def A__ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: Any= datasets.Dataset.from_dict(snake_case_ )
SCREAMING_SNAKE_CASE__: Union[str, Any]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(snake_case_ ) ) as con:
SCREAMING_SNAKE_CASE__: List[str]= con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(snake_case_ , '''w''' , newline='''''' ) as f:
SCREAMING_SNAKE_CASE__: str= csv.DictWriter(snake_case_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Tuple= str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(snake_case_ , '''w''' , newline='''''' ) as f:
SCREAMING_SNAKE_CASE__: Tuple= csv.DictWriter(snake_case_ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
import bza
SCREAMING_SNAKE_CASE__: Union[str, Any]= tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(snake_case_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__: int= f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(snake_case_ , '''wb''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: List[str]= tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: Any= tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(snake_case_ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__: int= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
SCREAMING_SNAKE_CASE__: Any= pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(snake_case_ , '''wb''' ) as f:
SCREAMING_SNAKE_CASE__: List[Any]= pq.ParquetWriter(snake_case_ , schema=snake_case_ )
SCREAMING_SNAKE_CASE__: List[str]= pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(snake_case_ ) )] for k in DATA[0]} , schema=snake_case_ )
writer.write_table(snake_case_ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[str]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
SCREAMING_SNAKE_CASE__: Tuple= {'''data''': DATA}
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
SCREAMING_SNAKE_CASE__: str= {'''data''': DATA_DICT_OF_LISTS}
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Tuple ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: str= str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Tuple= str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: int= str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(snake_case_ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(snake_case_ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Tuple , snake_case_ : List[str] ):
import gzip
SCREAMING_SNAKE_CASE__: List[str]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(snake_case_ , '''rb''' ) as orig_file:
with gzip.open(snake_case_ , '''wb''' ) as zipped_file:
zipped_file.writelines(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[Any] , snake_case_ : Tuple ):
import gzip
SCREAMING_SNAKE_CASE__: Optional[Any]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(snake_case_ , '''rb''' ) as orig_file:
with gzip.open(snake_case_ , '''wb''' ) as zipped_file:
zipped_file.writelines(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: Any= tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Dict , snake_case_ : str , snake_case_ : int , snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: List[Any]= tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''nested''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: Any= tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Dict , snake_case_ : int , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Optional[Any]= tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.add(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: int= tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(snake_case_ , '''w''' ) as f:
f.add(snake_case_ , arcname=os.path.join('''nested''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: List[str]= ['''0''', '''1''', '''2''', '''3''']
SCREAMING_SNAKE_CASE__: Optional[int]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: Optional[Any]= ['''0''', '''1''', '''2''', '''3''']
SCREAMING_SNAKE_CASE__: List[str]= str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: Dict= ['''0''', '''1''', '''2''', '''3''']
SCREAMING_SNAKE_CASE__: Any= tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(snake_case_ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: str= tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
f.write(snake_case_ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case_ ) ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : str ):
SCREAMING_SNAKE_CASE__: int= tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(snake_case_ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
SCREAMING_SNAKE_CASE__: Any= str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case_ )
return path
@pytest.fixture(scope='''session''' )
def A__ ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def A__ ( ):
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : str , snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: List[Any]= tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(snake_case_ , '''w''' ) as f:
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ) )
f.write(snake_case_ , arcname=os.path.basename(snake_case_ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: int= tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 64 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__a = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = 50257 , lowerCAmelCase = 1024 , lowerCAmelCase = 768 , lowerCAmelCase = 12 , lowerCAmelCase = 12 , lowerCAmelCase = None , lowerCAmelCase = "gelu_new" , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 0.1 , lowerCAmelCase = 1e-5 , lowerCAmelCase = 0.02 , lowerCAmelCase = True , lowerCAmelCase = True , lowerCAmelCase = False , lowerCAmelCase = False , ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__: List[str]= prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
f' `n_embd`: {n_embd} are not equal.' )
SCREAMING_SNAKE_CASE__: List[str]= prefix_inner_dim
SCREAMING_SNAKE_CASE__: Optional[int]= prefix_hidden_dim
SCREAMING_SNAKE_CASE__: Dict= (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE__: Dict= (
nn.Linear(self.prefix_hidden_dim , lowerCAmelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE__: Optional[Any]= GPTaConfig(
vocab_size=lowerCAmelCase , n_positions=lowerCAmelCase , n_embd=lowerCAmelCase , n_layer=lowerCAmelCase , n_head=lowerCAmelCase , n_inner=lowerCAmelCase , activation_function=lowerCAmelCase , resid_pdrop=lowerCAmelCase , embd_pdrop=lowerCAmelCase , attn_pdrop=lowerCAmelCase , layer_norm_epsilon=lowerCAmelCase , initializer_range=lowerCAmelCase , scale_attn_weights=lowerCAmelCase , use_cache=lowerCAmelCase , scale_attn_by_inverse_layer_idx=lowerCAmelCase , reorder_and_upcast_attn=lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: List[Any]= GPTaLMHeadModel(lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.transformer.transformer.wte(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.encode_prefix(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= self.decode_prefix(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE__: Tuple= torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.transformer(inputs_embeds=lowerCAmelCase , labels=lowerCAmelCase , attention_mask=lowerCAmelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> torch.Tensor:
return torch.zeros(lowerCAmelCase , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
return self.encode_prefix(lowerCAmelCase )
@torch.no_grad()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= torch.split(lowerCAmelCase , 1 , dim=0 )
SCREAMING_SNAKE_CASE__: str= []
SCREAMING_SNAKE_CASE__: Optional[Any]= []
for feature in features:
SCREAMING_SNAKE_CASE__: Optional[int]= self.decode_prefix(feature.to(lowerCAmelCase ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= self.generate_beam(
input_embeds=lowerCAmelCase , device=lowerCAmelCase , eos_token_id=lowerCAmelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE__: Dict= torch.stack(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.stack(lowerCAmelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCamelCase_ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase = 5 , lowerCAmelCase = 67 , lowerCAmelCase = 1.0 , lowerCAmelCase = None , ) -> Any:
SCREAMING_SNAKE_CASE__: Tuple= eos_token_id
SCREAMING_SNAKE_CASE__: List[Any]= None
SCREAMING_SNAKE_CASE__: Any= None
SCREAMING_SNAKE_CASE__: str= torch.ones(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.int )
SCREAMING_SNAKE_CASE__: List[Any]= torch.zeros(lowerCAmelCase , device=lowerCAmelCase , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE__: str= input_embeds
else:
SCREAMING_SNAKE_CASE__: Dict= self.transformer.transformer.wte(lowerCAmelCase )
for i in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.transformer(inputs_embeds=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[Any]= outputs.logits
SCREAMING_SNAKE_CASE__: int= logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE__: List[Any]= logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= logits.topk(lowerCAmelCase , -1 )
SCREAMING_SNAKE_CASE__: Tuple= generated.expand(lowerCAmelCase , *generated.shape[1:] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE__: Any= next_tokens
else:
SCREAMING_SNAKE_CASE__: Any= tokens.expand(lowerCAmelCase , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE__: Optional[int]= torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE__: Dict= -float(np.inf )
SCREAMING_SNAKE_CASE__: str= 0
SCREAMING_SNAKE_CASE__: List[Any]= scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE__: Any= scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= scores_sum_average.view(-1 ).topk(lowerCAmelCase , -1 )
SCREAMING_SNAKE_CASE__: List[Any]= next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE__: int= seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE__: Union[str, Any]= next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE__: Any= next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE__: Union[str, Any]= tokens[next_tokens_source]
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE__: Optional[Any]= generated[next_tokens_source]
SCREAMING_SNAKE_CASE__: Optional[Any]= scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE__: List[Any]= is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE__: Any= torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE__: List[str]= is_stopped + next_tokens.eq(lowerCAmelCase ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE__: Tuple= scores / seq_lengths
SCREAMING_SNAKE_CASE__: Tuple= scores.argsort(descending=lowerCAmelCase )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE__: int= [tokens[i] for i in order]
SCREAMING_SNAKE_CASE__: Dict= torch.stack(lowerCAmelCase , dim=0 )
SCREAMING_SNAKE_CASE__: List[str]= torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 64 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : str = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[str] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 1 |
from math import pi, sqrt, tan
def A__ ( snake_case_ : float ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def A__ ( snake_case_ : float ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def A__ ( snake_case_ : float ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def A__ ( snake_case_ : float , snake_case_ : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
SCREAMING_SNAKE_CASE__: Optional[Any]= (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def A__ ( snake_case_ : float , snake_case_ : float ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def A__ ( snake_case_ : float , snake_case_ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(snake_case_ , 2 ) * torus_radius * tube_radius
def A__ ( snake_case_ : float , snake_case_ : float ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def A__ ( snake_case_ : float ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def A__ ( snake_case_ : float , snake_case_ : float ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
SCREAMING_SNAKE_CASE__: Dict= (sidea + sidea + sidea) / 2
SCREAMING_SNAKE_CASE__: Dict= sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def A__ ( snake_case_ : float , snake_case_ : float ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def A__ ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def A__ ( snake_case_ : float ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def A__ ( snake_case_ : float , snake_case_ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def A__ ( snake_case_ : float , snake_case_ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def A__ ( snake_case_ : int , snake_case_ : float ):
if not isinstance(snake_case_ , snake_case_ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(1_0, 2_0) = }''')
print(f'''Square: {area_square(1_0) = }''')
print(f'''Triangle: {area_triangle(1_0, 1_0) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }''')
print(f'''Parallelogram: {area_parallelogram(1_0, 2_0) = }''')
print(f'''Rhombus: {area_rhombus(1_0, 2_0) = }''')
print(f'''Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }''')
print(f'''Circle: {area_circle(2_0) = }''')
print(f'''Ellipse: {area_ellipse(1_0, 2_0) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(2_0) = }''')
print(f'''Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }''')
print(f'''Sphere: {surface_area_sphere(2_0) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(2_0) = }''')
print(f'''Cone: {surface_area_cone(1_0, 2_0) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }''')
print(f'''Cylinder: {surface_area_cylinder(1_0, 2_0) = }''')
print(f'''Torus: {surface_area_torus(2_0, 1_0) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 1_0) = }''')
print(f'''Square: {area_reg_polygon(4, 1_0) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 1_0) = }''')
| 64 | from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64 | 1 |
def A__ ( snake_case_ : list ):
SCREAMING_SNAKE_CASE__: Dict= 0
while len(snake_case_ ) > 1:
SCREAMING_SNAKE_CASE__: Any= 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE__: Optional[int]= files.index(min(snake_case_ ) )
temp += files[min_index]
files.pop(snake_case_ )
files.append(snake_case_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__ ( snake_case_ : List[Any] ):
SCREAMING_SNAKE_CASE__: str= torch.load(snake_case_ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__: Any= torch.load(snake_case_ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__: List[str]= [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: str= {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__: Union[str, Any]= sd.pop(snake_case_ )
SCREAMING_SNAKE_CASE__: int= list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__: int= sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__: Optional[Any]= key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__: List[str]= key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__: Optional[int]= value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= torch.split(snake_case_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__: List[Any]= q
SCREAMING_SNAKE_CASE__: Any= k
SCREAMING_SNAKE_CASE__: Optional[Any]= v
del sd[key]
return sd
@torch.no_grad()
def A__ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple=None ):
SCREAMING_SNAKE_CASE__: List[str]= load_checkpoint(snake_case_ )
if config is not None:
SCREAMING_SNAKE_CASE__: Any= OPTConfig.from_pretrained(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= OPTConfig()
SCREAMING_SNAKE_CASE__: Union[str, Any]= OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
lowercase_ : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 64 | 1 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
SCREAMING_SNAKE_CASE__ : List[str] = 50_00_00
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.split(__file__)
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __lowercase ( snake_case, **snake_case ):
"""simple docstring"""
__magic_name__ :str = dataset.map(**snake_case )
@get_duration
def __lowercase ( snake_case, **snake_case ):
"""simple docstring"""
__magic_name__ :str = dataset.filter(**snake_case )
def __lowercase ( ):
"""simple docstring"""
__magic_name__ :Any = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ :Tuple = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
__magic_name__ :Union[str, Any] = generate_example_dataset(
os.path.join(snake_case, '''dataset.arrow''' ), snake_case, num_examples=snake_case )
__magic_name__ :int = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=snake_case )
def tokenize(snake_case ):
return tokenizer(examples['''text'''] )
__magic_name__ :str = map(snake_case )
__magic_name__ :Tuple = map(snake_case, batched=snake_case )
__magic_name__ :Optional[int] = map(snake_case, function=lambda snake_case : None, batched=snake_case )
with dataset.formatted_as(type='''numpy''' ):
__magic_name__ :str = map(snake_case, function=lambda snake_case : None, batched=snake_case )
with dataset.formatted_as(type='''pandas''' ):
__magic_name__ :Any = map(snake_case, function=lambda snake_case : None, batched=snake_case )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
__magic_name__ :List[Any] = map(snake_case, function=lambda snake_case : None, batched=snake_case )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
__magic_name__ :Optional[Any] = map(snake_case, function=lambda snake_case : None, batched=snake_case )
__magic_name__ :str = map(snake_case, function=snake_case, batched=snake_case )
__magic_name__ :Tuple = filter(snake_case )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(snake_case, '''wb''' ) as f:
f.write(json.dumps(snake_case ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 0 | def A__ ( snake_case_ : float , snake_case_ : float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__UpperCamelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]],dtype=tf.intaa,) # J'aime le camembert !"
__UpperCamelCase = model(A_ )['last_hidden_state']
__UpperCamelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape,A_ )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]],dtype=tf.floataa,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
| 1 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : str ) -> Any:
_A = tempfile.mkdtemp()
_A = SamImageProcessor()
_A = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Any , **__lowerCAmelCase : Any ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def snake_case_ ( self : Tuple ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Any ) -> Dict:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Union[str, Any] ) -> int:
_A = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : str ) -> Tuple:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def snake_case_ ( self : Dict ) -> Any:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = [torch.ones((1, 3, 5, 5) )]
_A = [[17_64, 26_46]]
_A = [[6_83, 10_24]]
_A = processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_A = processor.post_process_masks(
__lowerCAmelCase , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
_A = [np.ones((1, 3, 5, 5) )]
_A = processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_A = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCAmelCase ):
_A = processor.post_process_masks(__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) )
@require_vision
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : int ) -> Any:
_A = tempfile.mkdtemp()
_A = SamImageProcessor()
_A = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : int , **__lowerCAmelCase : Optional[Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def snake_case_ ( self : List[str] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : List[Any] ) -> int:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : str ) -> Tuple:
_A = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_A = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def snake_case_ ( self : Any ) -> Optional[int]:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''np''' )
_A = processor(images=__lowerCAmelCase , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def snake_case_ ( self : Optional[int] ) -> Any:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = [tf.ones((1, 3, 5, 5) )]
_A = [[17_64, 26_46]]
_A = [[6_83, 10_24]]
_A = processor.post_process_masks(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_A = processor.post_process_masks(
__lowerCAmelCase , tf.convert_to_tensor(__lowerCAmelCase ) , tf.convert_to_tensor(__lowerCAmelCase ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
_A = [np.ones((1, 3, 5, 5) )]
_A = processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
_A = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_A = processor.post_process_masks(
__lowerCAmelCase , np.array(__lowerCAmelCase ) , np.array(__lowerCAmelCase ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : Optional[int] ) -> str:
_A = tempfile.mkdtemp()
_A = SamImageProcessor()
_A = SamProcessor(__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : List[str] , **__lowerCAmelCase : Union[str, Any] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor
def snake_case_ ( self : List[str] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : str ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def snake_case_ ( self : Any ) -> str:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_A = [tf.convert_to_tensor(__lowerCAmelCase )]
_A = [torch.tensor(__lowerCAmelCase )]
_A = [[17_64, 26_46]]
_A = [[6_83, 10_24]]
_A = processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='''tf''' )
_A = processor.post_process_masks(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def snake_case_ ( self : Optional[int] ) -> List[Any]:
_A = self.get_image_processor()
_A = SamProcessor(image_processor=__lowerCAmelCase )
_A = self.prepare_image_inputs()
_A = image_processor(__lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
_A = processor(images=__lowerCAmelCase , return_tensors='''pt''' )['''pixel_values'''].numpy()
_A = image_processor(__lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
_A = processor(images=__lowerCAmelCase , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
| 2 | import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class _lowerCamelCase ( UpperCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__a = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__a = Features({"text": Value("string" )} )
__a = Features({"labels": ClassLabel} )
__a = "text"
__a = "labels"
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= copy.deepcopy(self )
SCREAMING_SNAKE_CASE__: Tuple= self.label_schema.copy()
SCREAMING_SNAKE_CASE__: Union[str, Any]= features[self.label_column]
SCREAMING_SNAKE_CASE__: List[str]= label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
def A_( A : str):
if not isinstance(A , A):
raise TypeError('The parameter s type must be str.')
return [s[i:] + s[:i] for i in range(len(A))]
def A_( A : str):
if not isinstance(A , A):
raise TypeError('The parameter s type must be str.')
if not s:
raise ValueError('The parameter s must not be empty.')
UpperCamelCase = all_rotations(A)
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
UpperCamelCase = {
"bwt_string": "".join([word[-1] for word in rotations]),
"idx_original_string": rotations.index(A),
}
return response
def A_( A : str , A : int):
if not isinstance(A , A):
raise TypeError('The parameter bwt_string type must be str.')
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.')
try:
UpperCamelCase = int(A)
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.')
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.')
if idx_original_string >= len(A):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).')
UpperCamelCase = [''] * len(A)
for _ in range(len(A)):
for i in range(len(A)):
UpperCamelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCAmelCase : int = 'Provide a string that I will generate its BWT transform: '
lowerCAmelCase : str = input(entry_msg).strip()
lowerCAmelCase : Dict = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result['bwt_string']}'"""
)
lowerCAmelCase : Any = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
f"""we get original string '{original_string}'"""
)
| 3 | import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int ):
return int((input_a, input_a).count(1 ) != 0 )
def _SCREAMING_SNAKE_CASE ():
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 4 | import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase , )
assert hasattr(self , '''env''' )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Tuple:
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__: Optional[Any]= {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__: Dict= {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__: Optional[Any]= {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__: Dict= '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase , py_version='''py36''' , )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
TrainingJobAnalytics(lowerCAmelCase ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
@parameterized.expand([(1,)] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> int:
# create estimator
SCREAMING_SNAKE_CASE__: List[str]= self.create_estimator(lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__: Any= TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__: Optional[int]= list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__: List[Any]= (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase )
| 64 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Dict ):
return (preds == labels).mean()
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
_lowercase : str = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def A ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
_lowerCAmelCase = processors[data_args.task_name]()
_lowerCAmelCase = processor.get_labels()
_lowerCAmelCase = len(__lowerCamelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__lowerCamelCase :EvalPrediction ) -> Dict:
_lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(__lowerCamelCase , p.label_ids )}
# Data collator
_lowerCAmelCase = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate()
_lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(__lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , __lowerCamelCase , __lowerCamelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(__lowerCamelCase )
return results
def A (__lowerCamelCase :int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 5 | import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase_ ( self ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Dict= ort.SessionOptions()
SCREAMING_SNAKE_CASE__: List[str]= False
return options
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__: int= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__: Tuple= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__: Tuple= OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= '''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__: Optional[Any]= np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__: Any= pipe(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__: Any= output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 64 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
_lowerCamelCase = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ["input_ids", "attention_mask"]
lowerCamelCase_ = RobertaTokenizer
def __init__( self :List[Any] , __A :Tuple=None , __A :str=None , __A :Tuple=None , __A :Tuple="replace" , __A :List[Any]="<s>" , __A :Union[str, Any]="</s>" , __A :Any="</s>" , __A :Dict="<s>" , __A :Optional[Any]="<unk>" , __A :List[str]="<pad>" , __A :Any="<mask>" , __A :Dict=False , __A :Tuple=True , **__A :Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = getattr(__A , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = pre_tok_class(**__A )
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = """post_processor"""
SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ = tuple(state["""sep"""] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ = tuple(state["""cls"""] )
SCREAMING_SNAKE_CASE__ = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ = add_prefix_space
SCREAMING_SNAKE_CASE__ = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
SCREAMING_SNAKE_CASE__ = trim_offsets
SCREAMING_SNAKE_CASE__ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ = getattr(__A , state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
def _snake_case ( self :str ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self :List[str] , __A :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
SCREAMING_SNAKE_CASE__ = value
def _snake_case ( self :str , *__A :int , **__A :Tuple ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def _snake_case ( self :Optional[Any] , *__A :List[Any] , **__A :Dict ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _snake_case ( self :Dict , __A :Union[str, Any] , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self :Optional[int] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 6 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase_ : List[Any] = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCAmelCase ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__: str= deprecated_arg[3:]
setattr(self , lowerCAmelCase , not kwargs.pop(lowerCAmelCase ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''torchscript''' , self.torchscript )
SCREAMING_SNAKE_CASE__: Union[str, Any]= kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
SCREAMING_SNAKE_CASE__: Any= kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**lowerCAmelCase )
__a = field(default=UpperCamelCase_ , metadata={"help": "Trace the models using torchscript"} )
__a = field(default=UpperCamelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def UpperCamelCase_ ( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
SCREAMING_SNAKE_CASE__: Any= torch.device('''cpu''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
elif is_torch_tpu_available():
SCREAMING_SNAKE_CASE__: List[str]= xm.xla_device()
SCREAMING_SNAKE_CASE__: Any= 0
else:
SCREAMING_SNAKE_CASE__: List[Any]= torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE__: List[str]= torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ ( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCamelCase_ ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCamelCase_ ( self ) -> str:
return self.n_gpu > 0
| 64 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 7 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 0 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase=0): # a graph with Node 0,1,...,N-1
'''simple docstring'''
__A : List[str] = n
__A : List[str] = [
[math.inf for j in range(0 , _UpperCAmelCase)] for i in range(0 , _UpperCAmelCase)
] # adjacency matrix for weight
__A : List[str] = [
[math.inf for j in range(0 , _UpperCAmelCase)] for i in range(0 , _UpperCAmelCase)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = w
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__A : List[str] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return self.dp[u][v]
if __name__ == "__main__":
lowercase__ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3) | 8 | import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ : Tuple = 3
def A__ ( snake_case_ : int ):
print('''Generating primitive root of p''' )
while True:
SCREAMING_SNAKE_CASE__: List[Any]= random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def A__ ( snake_case_ : int ):
print('''Generating prime p...''' )
SCREAMING_SNAKE_CASE__: List[Any]= rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
SCREAMING_SNAKE_CASE__: int= primitive_root(snake_case_ ) # one primitive root on modulo p.
SCREAMING_SNAKE_CASE__: int= random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
SCREAMING_SNAKE_CASE__: str= cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
SCREAMING_SNAKE_CASE__: int= (key_size, e_a, e_a, p)
SCREAMING_SNAKE_CASE__: Union[str, Any]= (key_size, d)
return public_key, private_key
def A__ ( snake_case_ : str , snake_case_ : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[Any]= generate_key(snake_case_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def A__ ( ):
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 64 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Any , ):
"""simple docstring"""
A__ = parent
A__ = 13
A__ = 7
A__ = 30
A__ = self.seq_length + self.mem_len
A__ = 15
A__ = True
A__ = True
A__ = 99
A__ = [10, 50, 80]
A__ = 32
A__ = 32
A__ = 4
A__ = 8
A__ = 1_28
A__ = 2
A__ = 2
A__ = None
A__ = 1
A__ = 0
A__ = 3
A__ = self.vocab_size - 1
A__ = 0.01
def _a ( self : int ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _a ( self : Tuple ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _a ( self : int , _snake_case : List[str] , _snake_case : Tuple , _snake_case : str , _snake_case : Union[str, Any] ):
"""simple docstring"""
A__ = TFTransfoXLModel(_snake_case )
A__ , A__ = model(_snake_case ).to_tuple()
A__ = {'input_ids': input_ids_a, 'mems': mems_a}
A__ , A__ = model(_snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _a ( self : int , _snake_case : Dict , _snake_case : Dict , _snake_case : Dict , _snake_case : Tuple ):
"""simple docstring"""
A__ = TFTransfoXLLMHeadModel(_snake_case )
A__ , A__ = model(_snake_case ).to_tuple()
A__ = {'input_ids': input_ids_a, 'labels': lm_labels}
A__ , A__ = model(_snake_case ).to_tuple()
A__ , A__ = model([input_ids_a, mems_a] ).to_tuple()
A__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
A__ , A__ = model(_snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _a ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Dict , _snake_case : int , _snake_case : Tuple ):
"""simple docstring"""
A__ = TFTransfoXLForSequenceClassification(_snake_case )
A__ = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__) , (A__)) = config_and_inputs
A__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
A__ : str = () if is_tf_available() else ()
A__ : Optional[int] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
A__ : List[str] = False
A__ : int = False
A__ : Tuple = False
A__ : Dict = False
def _a ( self : Dict , _snake_case : List[Any] , _snake_case : int , _snake_case : Dict , _snake_case : List[str] , _snake_case : str ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = TFTransfoXLModelTester(self )
A__ = ConfigTester(self , config_class=_snake_case , d_embed=37 )
def _a ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Any ):
"""simple docstring"""
self.model_tester.set_seed()
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_snake_case )
def _a ( self : Dict ):
"""simple docstring"""
self.model_tester.set_seed()
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_snake_case )
def _a ( self : int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_snake_case )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A__ = model_class(_snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A__ = model.get_output_embeddings()
assert isinstance(_snake_case , tf.keras.layers.Layer )
A__ = model.get_bias()
assert name is None
else:
A__ = model.get_output_embeddings()
assert x is None
A__ = model.get_bias()
assert name is None
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def _a ( self : str ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFTransfoXLModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def _a ( self : int ):
"""simple docstring"""
A__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
A__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A__ = model.generate(_snake_case , max_length=2_00 , do_sample=_snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() , _snake_case )
| 9 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 0 |
from __future__ import annotations
_lowerCAmelCase = 8.988E9 # units = N * m^s * C^-2
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
_UpperCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_UpperCamelCase = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_UpperCamelCase = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_UpperCamelCase = (COULOMBS_CONSTANT * charge_product / abs(__snake_case )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase_ : Dict = random.Random()
if is_torch_available():
import torch
def A__ ( snake_case_ : int , snake_case_ : Optional[Any]=1.0 , snake_case_ : Dict=None , snake_case_ : Dict=None ):
if rng is None:
SCREAMING_SNAKE_CASE__: Tuple= global_rng
SCREAMING_SNAKE_CASE__: List[str]= []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=400 , lowerCAmelCase=2000 , lowerCAmelCase=1 , lowerCAmelCase=0.0 , lowerCAmelCase=16000 , lowerCAmelCase=True , lowerCAmelCase=True , ) -> List[str]:
SCREAMING_SNAKE_CASE__: Optional[Any]= parent
SCREAMING_SNAKE_CASE__: Dict= batch_size
SCREAMING_SNAKE_CASE__: Optional[int]= min_seq_length
SCREAMING_SNAKE_CASE__: Dict= max_seq_length
SCREAMING_SNAKE_CASE__: Optional[Any]= (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__: Dict= feature_size
SCREAMING_SNAKE_CASE__: str= padding_value
SCREAMING_SNAKE_CASE__: Dict= sampling_rate
SCREAMING_SNAKE_CASE__: List[str]= return_attention_mask
SCREAMING_SNAKE_CASE__: str= do_normalize
def UpperCamelCase_ ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self , lowerCAmelCase=False , lowerCAmelCase=False ) -> Dict:
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__: int= floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__: int= [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = ASTFeatureExtractor
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: List[Any]= ASTFeatureExtractionTester(self )
def UpperCamelCase_ ( self ) -> Any:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__: Optional[int]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__: Dict= [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__: int= [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__: Tuple= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Union[str, Any]= feat_extract(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__: Optional[int]= [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__: List[Any]= np.asarray(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__: Optional[Any]= feat_extract(lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase_ ( self ) -> Dict:
import torch
SCREAMING_SNAKE_CASE__: Optional[Any]= self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__: List[str]= np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__: Optional[Any]= feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[int]:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__: Optional[int]= load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__: Dict= ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase_ ( self ) -> str:
# fmt: off
SCREAMING_SNAKE_CASE__: str= torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__: Any= self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__: Tuple= ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__: str= feature_extractor(lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) )
| 64 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __A :
'''simple docstring'''
def __init__(self , A ) -> None:
"""simple docstring"""
_a = num_of_nodes
_a = []
_a = {}
def a__ (self , A , A , A ) -> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def a__ (self , A ) -> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def a__ (self , A ) -> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
_a = self.find_component(A )
def a__ (self , A , A , A ) -> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
_a = v_node
component_size[v_node] += component_size[u_node]
self.set_component(A )
elif component_size[u_node] >= component_size[v_node]:
_a = self.find_component(A )
component_size[u_node] += component_size[v_node]
self.set_component(A )
def a__ (self ) -> None:
"""simple docstring"""
_a = []
_a = 0
_a = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_a = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_a , _a , _a = edge
_a = self.m_component[u]
_a = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_a = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(A , A ):
_a , _a , _a = edge
_a = self.m_component[u]
_a = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(A , A , A )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_a = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def lowerCAmelCase ():
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowercase_ : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Tuple = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
lowerCamelCase__ : Optional[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
lowerCamelCase__ : Dict = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
lowerCamelCase__ : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase__ : Any = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase__ : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Any = DPRContextEncoderTokenizer
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : List[str] = DPRQuestionEncoderTokenizer
lowerCamelCase__ : Any = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCamelCase__ : int = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCamelCase__ : Optional[int] = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class _snake_case :
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
elif titles is None or texts is None:
lowercase__ : Union[str, Any] = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[int] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [titles]
lowercase__ : List[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [texts]
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE_) == len(
SCREAMING_SNAKE_CASE_), f'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_)} titles and {len(SCREAMING_SNAKE_CASE_)} texts.'
lowercase__ : Dict = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)["""input_ids"""]
lowercase__ : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_)["""input_ids"""]
lowercase__ : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
]
}
if return_attention_mask is not False:
lowercase__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowercase__ : List[str] = attention_mask
return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 64 , SCREAMING_SNAKE_CASE_ = 4 , ):
'''simple docstring'''
lowercase__ : Dict = reader_input["""input_ids"""]
lowercase__ , lowercase__ , lowercase__ : str = reader_output[:3]
lowercase__ : List[Any] = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__)
lowercase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase__ : Dict = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowercase__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ : int = sequence_ids.index(self.pad_token_id)
else:
lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_)
lowercase__ : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(SCREAMING_SNAKE_CASE_) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Tuple = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowercase__ : Optional[Any] = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: x[1] , reverse=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowercase__ : Dict = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(SCREAMING_SNAKE_CASE_) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCAmelCase : int = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Any = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Any = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = ['input_ids', 'attention_mask']
__lowerCAmelCase : int = DPRReaderTokenizer
| 12 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 0 |
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=10_00 ) -> Union[str, Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__lowerCamelCase : Any = n - 1
__lowerCamelCase : Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__lowerCamelCase : Tuple = 0
while count < prec:
__lowerCamelCase : List[str] = random.randint(2 , n - 1 )
__lowerCamelCase : Tuple = bin_exp_mod(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if b != 1:
__lowerCamelCase : Optional[Any] = True
for _ in range(UpperCAmelCase_ ):
if b == n - 1:
__lowerCamelCase : Any = False
break
__lowerCamelCase : List[str] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ : Tuple = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 13 | from __future__ import annotations
from collections import deque
class _lowerCamelCase :
def __init__( self , lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: list[dict]= []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(lowerCAmelCase )
self.set_fail_transitions()
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase_ ( self , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: str= 0
for character in keyword:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.find_next_state(lowerCAmelCase , lowerCAmelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE__: Dict= len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= next_state
self.adlist[current_state]["output"].append(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> None:
SCREAMING_SNAKE_CASE__: deque= deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= 0
while q:
SCREAMING_SNAKE_CASE__: Union[str, Any]= q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[r]['''fail_state''']
while (
self.find_next_state(lowerCAmelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE__: Tuple= self.adlist[state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Dict= self.find_next_state(
lowerCAmelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
SCREAMING_SNAKE_CASE__: str= (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def UpperCamelCase_ ( self , lowerCAmelCase ) -> dict[str, list[int]]:
SCREAMING_SNAKE_CASE__: dict= {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE__: Optional[Any]= 0
for i in range(len(lowerCAmelCase ) ):
while (
self.find_next_state(lowerCAmelCase , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE__: Optional[int]= self.adlist[current_state]['''fail_state''']
SCREAMING_SNAKE_CASE__: Optional[int]= self.find_next_state(lowerCAmelCase , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE__: List[Any]= 0
else:
SCREAMING_SNAKE_CASE__: Dict= next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE__: Optional[Any]= []
result[key].append(i - len(lowerCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
_a : Optional[Any] = sorted(string.lower() )
return len(__a ) == len(set(__a ) )
if __name__ == "__main__":
a__ = input('''Enter a string ''').strip()
a__ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 14 | import numpy as np
def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__: int= ya
SCREAMING_SNAKE_CASE__: Tuple= xa
for k in range(snake_case_ ):
SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] )
SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 | import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Tuple= get_activation('''swish''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_activation('''silu''' )
self.assertIsInstance(lowerCAmelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: Optional[int]= get_activation('''mish''' )
self.assertIsInstance(lowerCAmelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__: Dict= get_activation('''gelu''' )
self.assertIsInstance(lowerCAmelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 64 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : str = logging.get_logger(__name__)
__A : str = 'https://openaipublic.azureedge.net/jukebox/models/'
__A : Optional[int] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def __a ( A__ : Union[str, Any] ):
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
SCREAMING_SNAKE_CASE = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
SCREAMING_SNAKE_CASE = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
SCREAMING_SNAKE_CASE = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
SCREAMING_SNAKE_CASE = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
SCREAMING_SNAKE_CASE = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def __a ( A__ : List[str] , A__ : Optional[Any] , A__ : List[Any] , A__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE = {}
import re
SCREAMING_SNAKE_CASE = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
SCREAMING_SNAKE_CASE = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
SCREAMING_SNAKE_CASE = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
SCREAMING_SNAKE_CASE = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
SCREAMING_SNAKE_CASE = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
SCREAMING_SNAKE_CASE = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
SCREAMING_SNAKE_CASE = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
SCREAMING_SNAKE_CASE = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
SCREAMING_SNAKE_CASE = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.sub(A__ , A__ )
elif re_encoder_block_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_encoder_block_resnet.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE = prefix + resnet_block
SCREAMING_SNAKE_CASE = re_encoder_block_resnet.sub(A__ , A__ )
elif re_encoder_block_proj_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.sub(A__ , A__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.sub(A__ , A__ )
elif re_decoder_block_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_decoder_block_resnet.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE = prefix + resnet_block
SCREAMING_SNAKE_CASE = re_decoder_block_resnet.sub(A__ , A__ )
elif re_decoder_block_proj_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.sub(A__ , A__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.sub(A__ , A__ )
elif re_prior_cond_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_prior_cond_resnet.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE = {"1": 1, "3": 2}[groups[-2]]
SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
SCREAMING_SNAKE_CASE = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
SCREAMING_SNAKE_CASE = prefix + resnet_block
SCREAMING_SNAKE_CASE = re_prior_cond_resnet.sub(A__ , A__ )
elif re_prior_cond_proj_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.match(A__ )
SCREAMING_SNAKE_CASE = regex_match.groups()
SCREAMING_SNAKE_CASE = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.sub(A__ , A__ )
# keep original key
else:
SCREAMING_SNAKE_CASE = original_key
SCREAMING_SNAKE_CASE = replace_key(A__ )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
SCREAMING_SNAKE_CASE = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
SCREAMING_SNAKE_CASE = original_key
SCREAMING_SNAKE_CASE = original_key
SCREAMING_SNAKE_CASE = value
return new_dict
@torch.no_grad()
def __a ( A__ : Tuple=None , A__ : List[Any]=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
SCREAMING_SNAKE_CASE = requests.get(F"{PREFIX}{file}" , allow_redirects=A__ )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=A__ )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
SCREAMING_SNAKE_CASE = MODEL_MAPPING[model_name.split("/" )[-1]]
SCREAMING_SNAKE_CASE = JukeboxConfig.from_pretrained(A__ )
SCREAMING_SNAKE_CASE = JukeboxModel(A__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {}
for i, dict_name in enumerate(A__ ):
SCREAMING_SNAKE_CASE = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
SCREAMING_SNAKE_CASE = {}
for k in old_dic.keys():
if k.endswith(".b" ):
SCREAMING_SNAKE_CASE = old_dic[k]
elif k.endswith(".w" ):
SCREAMING_SNAKE_CASE = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE = old_dic[k]
else:
SCREAMING_SNAKE_CASE = old_dic[k]
SCREAMING_SNAKE_CASE = "vqvae" if i == 0 else F"priors.{3 - i}"
SCREAMING_SNAKE_CASE = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ )
weight_dict.append(A__ )
SCREAMING_SNAKE_CASE = weight_dict.pop(0 )
model.vqvae.load_state_dict(A__ )
for i in range(len(A__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A__ ).mkdir(exist_ok=A__ )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(A__ , A__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
return weight_dict
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
__A : List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 16 | from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ : Tuple = TypeVar('T')
class _lowerCamelCase ( Generic[T] ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Any | T= None
SCREAMING_SNAKE_CASE__: int= len(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: list[T]= [any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE__: List[Any]= fnc
self.build()
def UpperCamelCase_ ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> None:
p += self.N
SCREAMING_SNAKE_CASE__: Union[str, Any]= v
while p > 1:
SCREAMING_SNAKE_CASE__: Any= p // 2
SCREAMING_SNAKE_CASE__: Optional[Any]= self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> T | None: # noqa: E741
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= l + self.N, r + self.N
SCREAMING_SNAKE_CASE__: T | None= None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE__: str= self.st[l] if res is None else self.fn(lowerCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.st[r] if res is None else self.fn(lowerCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ : str = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ : str = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ : int = SegmentTree(test_array, min)
lowercase_ : Optional[int] = SegmentTree(test_array, max)
lowercase_ : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ):
for i in range(len(snake_case_ ) ):
for j in range(snake_case_ , len(snake_case_ ) ):
SCREAMING_SNAKE_CASE__: Any= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: Optional[Any]= reduce(snake_case_ , test_array[i : j + 1] )
SCREAMING_SNAKE_CASE__: int= reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(snake_case_ , snake_case_ )
assert max_range == max_segment_tree.query(snake_case_ , snake_case_ )
assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 64 | 0 |
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
from torch.utils.cpp_extension import load
__A : int = Path(a__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
__A : Any = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" ,"""ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" ,"""ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" ,a__ ,with_cuda=a__ ,extra_include_paths=[str(a__ )] ,extra_cflags=["""-DWITH_CUDA=1"""] ,extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] ,)
import MultiScaleDeformableAttention as MSDA
return MSDA
| 17 | # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
__a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Union[str, Any]= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: Optional[int]= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= 2
SCREAMING_SNAKE_CASE__: Tuple= randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , )
SCREAMING_SNAKE_CASE__: int= floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: str= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: Tuple= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> Tuple:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = StableDiffusionControlNetImgaImgPipeline
__a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def UpperCamelCase_ ( self ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: int= UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase ):
if isinstance(lowerCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__: Any= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Tuple= AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: Optional[int]= CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__: Any= CLIPTextModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__: Dict= MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__: int= {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> List[Any]:
if str(lowerCAmelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__: str= torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= 2
SCREAMING_SNAKE_CASE__: Tuple= [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase , device=torch.device(lowerCAmelCase ) , ),
]
SCREAMING_SNAKE_CASE__: Union[str, Any]= floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__: Union[str, Any]= Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__: int= {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: str= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= 10.0
SCREAMING_SNAKE_CASE__: Any= 4
SCREAMING_SNAKE_CASE__: Optional[Any]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: List[Any]= pipe(**lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE__: Tuple= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: int= pipe(**lowerCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__: Dict= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= steps
SCREAMING_SNAKE_CASE__: List[Any]= scale
SCREAMING_SNAKE_CASE__: str= pipe(**lowerCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_dummy_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= steps
SCREAMING_SNAKE_CASE__: int= scale
SCREAMING_SNAKE_CASE__: Any= pipe(**lowerCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def UpperCamelCase_ ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase_ ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Any= self.get_dummy_components()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Optional[int]= ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
SCREAMING_SNAKE_CASE__: Tuple= StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase , controlnet=lowerCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= '''evil space-punk bird'''
SCREAMING_SNAKE_CASE__: List[str]= load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: List[Any]= load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
SCREAMING_SNAKE_CASE__: Optional[Any]= pipe(
lowerCAmelCase , lowerCAmelCase , control_image=lowerCAmelCase , generator=lowerCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__: Union[str, Any]= output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__: str= load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 64 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase = ""
else:
_lowerCAmelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_lowerCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = val
def __a():
'''simple docstring'''
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
_lowerCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase = 1000
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "imagenet-1k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = int(deit_name[-6:-4] )
_lowerCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_lowerCAmelCase = 192
_lowerCAmelCase = 768
_lowerCAmelCase = 12
_lowerCAmelCase = 3
elif deit_name[9:].startswith("small" ):
_lowerCAmelCase = 384
_lowerCAmelCase = 1536
_lowerCAmelCase = 12
_lowerCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_lowerCAmelCase = 1024
_lowerCAmelCase = 4096
_lowerCAmelCase = 24
_lowerCAmelCase = 16
# load original model from timm
_lowerCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase = timm_model.state_dict()
_lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
_lowerCAmelCase = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowerCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowerCAmelCase = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE_ , crop_size=config.image_size )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase = encoding["pixel_values"]
_lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 18 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3]))
self.assertEqual(arr.type , pa.intaa())
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
with self.assertRaises(__a):
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3]) , type=pa.intaa())
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
with self.assertRaises(__a):
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''') , type=Value('''int64''')))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''')))
self.assertEqual(arr.type , pa.intaa())
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
_UpperCamelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''')))
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''')))
self.assertEqual(arr.type , pa.intaa())
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''')))
self.assertEqual(arr.type , pa.string())
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''')))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64'''))
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid)):
_UpperCamelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''')))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''')))
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64'''))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''')))
self.assertEqual(arr.type , pa.string())
@require_pil
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
import PIL.Image
_UpperCamelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta).reshape(2 , 5))
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=__a) as mock_cast_to_python_objects:
_UpperCamelCase = pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image] , type=Image()))
_UpperCamelCase , _UpperCamelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , __a)
self.assertFalse(kwargs['''optimize_list_casting'''])
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = pa.BufferReader(__snake_case ) if isinstance(__snake_case, pa.Buffer ) else pa.memory_map(__snake_case )
_UpperCamelCase = pa.ipc.open_stream(__snake_case )
_UpperCamelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case, schema=__snake_case, writer_batch_size=__snake_case ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=__snake_case, features=__snake_case ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCamelCase = pa.BufferReader(output.getvalue() )
_UpperCamelCase = pa.ipc.open_stream(__snake_case )
_UpperCamelCase = f.read_all()
_UpperCamelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__snake_case )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case, writer_batch_size=__snake_case, hash_salt='''split_name''', check_duplicates=__snake_case, ) as writer:
with pytest.raises(__snake_case ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=[1, 2] )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''', [None, 2, 10] )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case, writer_batch_size=__snake_case, hash_salt='''split_name''', check_duplicates=__snake_case, ) as writer:
with pytest.raises(__snake_case ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2}, key=10 )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''', [None, 2, 10] )
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=__snake_case, writer_batch_size=__snake_case, hash_salt='''split_name''', check_duplicates=__snake_case, ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1}, key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2}, key=2 )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case, schema=__snake_case, writer_batch_size=__snake_case ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case, schema=__snake_case, writer_batch_size=__snake_case ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''', [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''', [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
_UpperCamelCase = pa.schema(__snake_case ) if fields else None
with ArrowWriter(stream=__snake_case, schema=__snake_case, writer_batch_size=__snake_case ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(output.getvalue(), expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
_UpperCamelCase = os.path.join(__snake_case, '''test.arrow''' )
with ArrowWriter(path=__snake_case, schema=pa.schema(__snake_case ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__snake_case, metadata=writer._schema.metadata )
_check_output(__snake_case, 1 )
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
if pa.types.is_list(__snake_case ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
if isinstance(lst[0], __snake_case ):
change_first_primitive_element_in_list(lst[0], __snake_case )
else:
_UpperCamelCase = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''', [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = pa.array(TypedSequence(__snake_case, optimized_int_type=__snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''', [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
], )
@pytest.mark.parametrize('''sequence''', [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = pa.array(OptimizedTypedSequence(__snake_case, col=__snake_case ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCamelCase = copy.deepcopy(__snake_case )
_UpperCamelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__snake_case, __snake_case )
_UpperCamelCase = pa.array(OptimizedTypedSequence(__snake_case, col=__snake_case ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''', [False, True] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=__snake_case ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''mock://dataset-train.arrow'''
with ArrowWriter(path=__snake_case, storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs, type(__snake_case ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__snake_case )
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = pa.BufferOutputStream()
with ParquetWriter(stream=__snake_case ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
_UpperCamelCase , _UpperCamelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCamelCase = pa.BufferReader(output.getvalue() )
_UpperCamelCase = pq.read_table(__snake_case )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''', [False, True] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
import PIL.Image
_UpperCamelCase = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5), dtype=np.uinta ) ).save(__snake_case, format='''png''' )
_UpperCamelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=__snake_case, features=Features({'''image''': Image()} ), embed_local_files=__snake_case ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
_UpperCamelCase = pa.BufferReader(output.getvalue() )
_UpperCamelCase = pq.read_table(__snake_case )
_UpperCamelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''], __snake_case )
with open(__snake_case, '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = pa.schema([pa.field('''col_1''', pa.string(), nullable=__snake_case )] )
_UpperCamelCase = pa.BufferOutputStream()
with ArrowWriter(stream=__snake_case ) as writer:
writer._build_writer(inferred_schema=__snake_case )
assert writer._schema == pa.schema([pa.field('''col_1''', pa.string() )] )
| 19 | def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.