code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_a : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Optional[int] = 0
_a : str = 2
while digits < n:
index += 1
_a : Optional[int] = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def __lowerCamelCase ( lowerCAmelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 1 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 358 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.inf
def set_batch_size(__a ) -> None:
nonlocal batch_size
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__a , __a ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(__a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__a , __a )
return None if batch_size is np.inf else batch_size
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Any = path_or_paths if isinstance(lowercase_ , lowercase_) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_ : Any = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Parquet(
cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
self.builder.download_and_prepare(
download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE_ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory)
return dataset
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = dataset
SCREAMING_SNAKE_CASE_ : Dict = path_or_buf
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size or get_writer_batch_size(dataset.features)
SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with open(self.path_or_buf , '''wb+''') as buffer:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs)
else:
SCREAMING_SNAKE_CASE_ : str = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs)
return written
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = parquet_writer_kwargs.pop('''path_or_buf''' , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE_ : Tuple = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_)
for offset in logging.tqdm(
range(0 , len(self.dataset) , lowercase_) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE_ : List[Any] = query_table(
table=self.dataset._data , key=slice(lowercase_ , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(lowercase_)
written += batch.nbytes
writer.close()
return written
| 318 | 0 |
'''simple docstring'''
import math
import qiskit
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] = 1 , UpperCAmelCase__ : List[str] = 1 , UpperCAmelCase__ : Dict = 1 ) -> Tuple:
if (
isinstance(A__ , A__ )
or isinstance(A__ , A__ )
or isinstance(A__ , A__ )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(A__ ) != input_a)
or (math.floor(A__ ) != input_a)
or (math.floor(A__ ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
lowercase_ : int = qiskit.QuantumRegister(4 , """qr""" )
lowercase_ : Union[str, Any] = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
lowercase_ : Optional[int] = [input_a, input_a, carry_in]
lowercase_ : Optional[Any] = qiskit.QuantumCircuit(A__ , A__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(A__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , A__ ) # measure the last two qbits
lowercase_ : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
lowercase_ : List[str] = qiskit.execute(A__ , A__ , shots=1000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 239 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowerCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__lowercase = getattr(A__ , A__ )
if weight_type is not None:
__lowercase = getattr(A__ , A__ ).shape
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
__lowercase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(A__ )[0].split('''.''' )[-2]
__lowercase = mapped_key.replace('''*''' , A__ )
if "weight_g" in name:
__lowercase = '''weight_g'''
elif "weight_v" in name:
__lowercase = '''weight_v'''
elif "bias" in name:
__lowercase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = '''weight'''
else:
__lowercase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"Unused weights: {unused_weights}" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = full_name.split('''conv_layers.''' )[-1]
__lowercase = name.split('''.''' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
@torch.no_grad()
def _A ( A__ , A__ , A__=None , A__=None , A__=True ):
"""simple docstring"""
if config_path is not None:
__lowercase = UniSpeechSatConfig.from_pretrained(A__ )
else:
__lowercase = UniSpeechSatConfig()
__lowercase = ''''''
if is_finetuned:
__lowercase = UniSpeechSatForCTC(A__ )
else:
__lowercase = UniSpeechSatForPreTraining(A__ )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowercase = model[0].eval()
recursively_load_weights(A__ , A__ )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 104 | 0 |
lowerCamelCase__ : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def UpperCAmelCase_ ( __UpperCAmelCase : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f"a bytes-like object is required, not '{data.__class__.__name__}'"
raise TypeError(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = ''.join(bin(__UpperCAmelCase )[2:].zfill(8 ) for byte in data )
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
SCREAMING_SNAKE_CASE_ = b'=' * ((6 - len(__UpperCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__UpperCAmelCase ) % 6)
else:
SCREAMING_SNAKE_CASE_ = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__UpperCAmelCase ) , 6 ) ).encode()
+ padding
)
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = (
'argument should be a bytes-like object or ASCII string, '
f"not '{encoded_data.__class__.__name__}'"
)
raise TypeError(__UpperCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_ = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
SCREAMING_SNAKE_CASE_ = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__UpperCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
SCREAMING_SNAKE_CASE_ = encoded_data[:-padding]
SCREAMING_SNAKE_CASE_ = ''.join(
bin(B64_CHARSET.index(__UpperCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
SCREAMING_SNAKE_CASE_ = ''.join(
bin(B64_CHARSET.index(__UpperCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
SCREAMING_SNAKE_CASE_ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__UpperCAmelCase ) , 8 )
]
return bytes(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 364 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCamelCase__ : Dict = TypeVar('T')
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (position - 1) // 2
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (2 * position) + 1
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
def __len__( self : Optional[Any] ):
return self.elements
def __repr__( self : Optional[int] ):
return str(self.heap )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Check if the priority queue is empty
return self.elements == 0
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE_ = self.elements
self.elements += 1
self._bubble_up(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[0]
self._bubble_down(_lowerCAmelCase )
return elem
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Update the weight of the given key
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
SCREAMING_SNAKE_CASE_ = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE_ = get_parent_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE_ = get_parent_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_up(_lowerCAmelCase )
return None
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
SCREAMING_SNAKE_CASE_ = self.position_map[elem]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ = get_child_left_position(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = get_child_right_position(_lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_left_position]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
return None
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# Swap the nodes at the given positions
SCREAMING_SNAKE_CASE_ = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE_ = nodea_pos
SCREAMING_SNAKE_CASE_ = nodea_pos
class lowerCamelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 0
def __repr__( self : Optional[int] ):
return str(self.connections )
def __len__( self : Tuple ):
return self.nodes
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
SCREAMING_SNAKE_CASE_ = {}
self.nodes += 1
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : T , _lowerCAmelCase : T , _lowerCAmelCase : int ):
# Add an edge between 2 nodes in the graph
self.add_node(_lowerCAmelCase )
self.add_node(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = weight
SCREAMING_SNAKE_CASE_ = weight
def UpperCAmelCase_ ( __UpperCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
SCREAMING_SNAKE_CASE_ = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE_ = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCAmelCase , __UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE_ = priority_queue.extract_min()
SCREAMING_SNAKE_CASE_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ = node
return dist, parent | 210 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = 'MobileNetV1Config'
# Base docstring
a_ = 'google/mobilenet_v1_1.0_224'
a_ = [1, 1_024, 7, 7]
# Image classification docstring
a_ = 'google/mobilenet_v1_1.0_224'
a_ = 'tabby, tabby cat'
a_ = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=None ):
UpperCamelCase_ : Optional[int] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = model.mobilenet_va
else:
UpperCamelCase_ : Tuple = model
UpperCamelCase_ : int = """MobilenetV1/Conv2d_0/"""
UpperCamelCase_ : int = backbone.conv_stem.convolution.weight
UpperCamelCase_ : int = backbone.conv_stem.normalization.bias
UpperCamelCase_ : Tuple = backbone.conv_stem.normalization.weight
UpperCamelCase_ : Any = backbone.conv_stem.normalization.running_mean
UpperCamelCase_ : List[str] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
UpperCamelCase_ : Any = i + 1
UpperCamelCase_ : str = i * 2
UpperCamelCase_ : Optional[Any] = backbone.layer[pt_index]
UpperCamelCase_ : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
UpperCamelCase_ : Dict = pointer.convolution.weight
UpperCamelCase_ : int = pointer.normalization.bias
UpperCamelCase_ : Optional[int] = pointer.normalization.weight
UpperCamelCase_ : Tuple = pointer.normalization.running_mean
UpperCamelCase_ : Any = pointer.normalization.running_var
UpperCamelCase_ : int = backbone.layer[pt_index + 1]
UpperCamelCase_ : Dict = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
UpperCamelCase_ : List[Any] = pointer.convolution.weight
UpperCamelCase_ : str = pointer.normalization.bias
UpperCamelCase_ : Optional[Any] = pointer.normalization.weight
UpperCamelCase_ : Tuple = pointer.normalization.running_mean
UpperCamelCase_ : Tuple = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ : List[str] = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
UpperCamelCase_ : Optional[Any] = model.classifier.weight
UpperCamelCase_ : int = model.classifier.bias
return tf_to_pt_map
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
UpperCamelCase_ : Optional[int] = tf.train.list_variables(_lowerCamelCase )
UpperCamelCase_ : List[str] = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
UpperCamelCase_ : Optional[Any] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : Optional[int] = array
# Build TF to PyTorch weights loading map
UpperCamelCase_ : int = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
UpperCamelCase_ : Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
UpperCamelCase_ : List[str] = np.transpose(_lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
UpperCamelCase_ : Union[str, Any] = array.squeeze().transpose()
else:
UpperCamelCase_ : Union[str, Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
UpperCamelCase_ : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
tf_weights.pop(_lowerCamelCase , _lowerCamelCase )
tf_weights.pop(name + '/RMSProp' , _lowerCamelCase )
tf_weights.pop(name + '/RMSProp_1' , _lowerCamelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' , _lowerCamelCase )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def __lowercase ( lowerCamelCase : torch.Tensor , lowerCamelCase : nn.Convad ):
UpperCamelCase_ : Tuple = features.shape[-2:]
UpperCamelCase_ : Any = conv_layer.stride
UpperCamelCase_ : Optional[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase_ : Union[str, Any] = max(kernel_height - stride_height , 0 )
else:
UpperCamelCase_ : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
UpperCamelCase_ : int = max(kernel_width - stride_width , 0 )
else:
UpperCamelCase_ : Tuple = max(kernel_width - (in_width % stride_width) , 0 )
UpperCamelCase_ : Dict = pad_along_width // 2
UpperCamelCase_ : Dict = pad_along_width - pad_left
UpperCamelCase_ : Optional[Any] = pad_along_height // 2
UpperCamelCase_ : Any = pad_along_height - pad_top
UpperCamelCase_ : Dict = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , 'constant' , 0.0 )
class _lowercase ( nn.Module ):
def __init__( self : List[Any] , snake_case : str , snake_case : Optional[int] , snake_case : Tuple , snake_case : Optional[int] , snake_case : Union[str, Any] = 1 , snake_case : Union[str, Any] = 1 , snake_case : Any = False , snake_case : Union[str, Any] = True , snake_case : List[str] = True , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : Any = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
UpperCamelCase_ : int = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase_ : List[str] = nn.Convad(
in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=snake_case_ , groups=snake_case_ , bias=snake_case_ , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase_ : Union[str, Any] = nn.BatchNormad(
num_features=snake_case_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=snake_case_ , track_running_stats=snake_case_ , )
else:
UpperCamelCase_ : Optional[int] = None
if use_activation:
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_ : List[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , snake_case_ ):
UpperCamelCase_ : Optional[int] = ACTaFN[config.hidden_act]
else:
UpperCamelCase_ : int = config.hidden_act
else:
UpperCamelCase_ : Dict = None
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[str] ) -> str:
"""simple docstring"""
if self.config.tf_padding:
UpperCamelCase_ : int = apply_tf_padding(snake_case_ , self.convolution )
UpperCamelCase_ : Tuple = self.convolution(snake_case_ )
if self.normalization is not None:
UpperCamelCase_ : Dict = self.normalization(snake_case_ )
if self.activation is not None:
UpperCamelCase_ : Optional[Any] = self.activation(snake_case_ )
return features
class _lowercase ( _a ):
lowercase = MobileNetVaConfig
lowercase = load_tf_weights_in_mobilenet_va
lowercase = """mobilenet_v1"""
lowercase = """pixel_values"""
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : str ) -> List[str]:
"""simple docstring"""
if isinstance(snake_case_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
a_ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a_ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , _a , )
class _lowercase ( _a ):
def __init__( self : Optional[int] , snake_case : Dict , snake_case : List[str] = True ) -> List[Any]:
"""simple docstring"""
super().__init__(snake_case_ )
UpperCamelCase_ : Optional[int] = config
UpperCamelCase_ : List[Any] = 3_2
UpperCamelCase_ : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase_ : List[str] = MobileNetVaConvLayer(
snake_case_ , in_channels=config.num_channels , out_channels=snake_case_ , kernel_size=3 , stride=2 , )
UpperCamelCase_ : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase_ : Tuple = nn.ModuleList()
for i in range(1_3 ):
UpperCamelCase_ : Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase_ : Any = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=3 , stride=strides[i] , groups=snake_case_ , ) )
self.layer.append(
MobileNetVaConvLayer(
snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , kernel_size=1 , ) )
UpperCamelCase_ : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Dict = None , snake_case : List[str] = None , snake_case : Tuple = None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
UpperCamelCase_ : Dict = self.conv_stem(snake_case_ )
UpperCamelCase_ : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase_ : Any = layer_module(snake_case_ )
if output_hidden_states:
UpperCamelCase_ : Union[str, Any] = all_hidden_states + (hidden_states,)
UpperCamelCase_ : Union[str, Any] = hidden_states
if self.pooler is not None:
UpperCamelCase_ : List[Any] = torch.flatten(self.pooler(snake_case_ ) , start_dim=1 )
else:
UpperCamelCase_ : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=snake_case_ , )
@add_start_docstrings(
'\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _a , )
class _lowercase ( _a ):
def __init__( self : Union[str, Any] , snake_case : Dict ) -> int:
"""simple docstring"""
super().__init__(snake_case_ )
UpperCamelCase_ : Tuple = config.num_labels
UpperCamelCase_ : List[str] = MobileNetVaModel(snake_case_ )
UpperCamelCase_ : str = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase_ : Union[str, Any] = nn.Dropout(config.classifier_dropout_prob , inplace=snake_case_ )
UpperCamelCase_ : Dict = nn.Linear(snake_case_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : int = None , snake_case : Any = None , snake_case : Dict = None , snake_case : List[Any] = None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase_ : List[Any] = self.mobilenet_va(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
UpperCamelCase_ : Dict = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase_ : Tuple = self.classifier(self.dropout(snake_case_ ) )
UpperCamelCase_ : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase_ : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase_ : Tuple = """single_label_classification"""
else:
UpperCamelCase_ : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
UpperCamelCase_ : Optional[int] = MSELoss()
if self.num_labels == 1:
UpperCamelCase_ : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase_ : List[Any] = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase_ : str = CrossEntropyLoss()
UpperCamelCase_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase_ : Union[str, Any] = BCEWithLogitsLoss()
UpperCamelCase_ : str = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
UpperCamelCase_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states , )
| 175 |
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309 | 0 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_SCREAMING_SNAKE_CASE : Union[str, Any] = "<<<<<<< This should probably be modified because it mentions: "
_SCREAMING_SNAKE_CASE : Optional[int] = "=======\n>>>>>>>\n"
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
_SCREAMING_SNAKE_CASE : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"tfds\.core", r"datasets"),
(r"tf\.io\.gfile\.GFile", r"open"),
(r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
(r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
(r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
(r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
(r"tfds\.features\.FeaturesDict\(", r"dict("),
(r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(r"tfds\.", r"datasets."),
(r"dl_manager\.manual_dir", r"self.config.data_dir"),
(r"self\.builder_config", r"self.config"),
]
def UpperCamelCase_( snake_case : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _snake_case ( lowercase_ ):
@staticmethod
def lowerCAmelCase__ ( a__ ) -> int:
'''simple docstring'''
snake_case_ = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=a__ , required=a__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=a__ , required=a__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=a__ )
def __init__( self , a__ , a__ , *a__ ) -> List[Any]:
'''simple docstring'''
snake_case_ = get_logger("datasets-cli/converting" )
snake_case_ = tfds_path
snake_case_ = datasets_directory
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
snake_case_ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case_ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
snake_case_ = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
snake_case_ = []
snake_case_ = []
snake_case_ = {}
if os.path.isdir(self._tfds_path ):
snake_case_ = os.listdir(a__ )
else:
snake_case_ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
snake_case_ = os.path.join(a__ , a__ )
snake_case_ = os.path.join(a__ , a__ )
if not os.path.isfile(a__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(a__ , encoding="utf-8" ) as f:
snake_case_ = f.readlines()
snake_case_ = []
snake_case_ = False
snake_case_ = False
snake_case_ = []
for line in lines:
snake_case_ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case_ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
snake_case_ = ""
continue
elif "from absl import logging" in out_line:
snake_case_ = "from datasets import logging\n"
elif "getLogger" in out_line:
snake_case_ = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case_ = True
snake_case_ = list(filter(lambda a__ : e in out_line , a__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a__ ) + "\n" )
out_lines.append(a__ )
out_lines.append(a__ )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case_ = re.sub(a__ , a__ , a__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case_ = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , a__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
snake_case_ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case_ = True
out_lines.append(a__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case_ = f_name.replace(".py" , "" )
snake_case_ = os.path.join(a__ , a__ )
snake_case_ = os.path.join(a__ , a__ )
os.makedirs(a__ , exist_ok=a__ )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(a__ )
if needs_manual_update:
with_manual_update.append(a__ )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.writelines(a__ )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
snake_case_ = os.path.basename(a__ )
snake_case_ = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(a__ , a__ )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 356 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def UpperCamelCase_( snake_case : np.ndarray ):
'''simple docstring'''
snake_case_ , snake_case_ = np.shape(snake_case )
if rows != columns:
snake_case_ = (
"'table' has to be of square shaped array but got a "
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(snake_case )
snake_case_ = np.zeros((rows, columns) )
snake_case_ = np.zeros((rows, columns) )
for i in range(snake_case ):
for j in range(snake_case ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(snake_case ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
snake_case_ = (table[i][j] - total) / upper[j][j]
snake_case_ = 1
for j in range(snake_case , snake_case ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(snake_case ) )
snake_case_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase ( lowercase , lowercase ):
@register_to_config
def __init__(self : Tuple , _A : int = 7_68 , ) -> str:
super().__init__()
__snake_case : Any = nn.Parameter(torch.zeros(1 , _A))
__snake_case : Optional[int] = nn.Parameter(torch.ones(1 , _A))
def _lowercase (self : List[str] , _A : Optional[Union[str, torch.device]] = None , _A : Optional[torch.dtype] = None , ) -> Tuple:
__snake_case : Any = nn.Parameter(self.mean.to(_A).to(_A))
__snake_case : str = nn.Parameter(self.std.to(_A).to(_A))
return self
def _lowercase (self : Union[str, Any] , _A : List[str]) -> List[str]:
__snake_case : str = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowercase (self : Union[str, Any] , _A : Any) -> List[Any]:
__snake_case : Tuple = (embeds * self.std) + self.mean
return embeds
| 172 | """simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Any) -> Optional[int]:
__snake_case : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Any , _A : Any) -> str:
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = hidden_states.shape
__snake_case : Union[str, Any] = jax.image.resize(
_A , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__snake_case : List[Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Optional[Any]) -> List[Any]:
__snake_case : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : int , _A : str) -> Any:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__snake_case : Union[str, Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : int = None
UpperCAmelCase : float = 0.0
UpperCAmelCase : bool = None
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : List[str]) -> Dict:
__snake_case : str = self.in_channels if self.out_channels is None else self.out_channels
__snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : Optional[int] = nn.Dense(_A , dtype=self.dtype)
__snake_case : int = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Dropout(self.dropout_prob)
__snake_case : Dict = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case : Optional[Any] = None
if use_nin_shortcut:
__snake_case : List[str] = nn.Conv(
_A , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__(self : List[Any] , _A : Union[str, Any] , _A : str , _A : int=True) -> Any:
__snake_case : List[Any] = hidden_states
__snake_case : Optional[Any] = self.norma(_A)
__snake_case : int = nn.swish(_A)
__snake_case : Optional[int] = self.conva(_A)
__snake_case : Dict = self.time_emb_proj(nn.swish(_A))
__snake_case : List[str] = jnp.expand_dims(jnp.expand_dims(_A , 1) , 1)
__snake_case : Any = hidden_states + temb
__snake_case : Tuple = self.norma(_A)
__snake_case : Dict = nn.swish(_A)
__snake_case : Union[str, Any] = self.dropout(_A , _A)
__snake_case : Union[str, Any] = self.conva(_A)
if self.conv_shortcut is not None:
__snake_case : List[Any] = self.conv_shortcut(_A)
return hidden_states + residual
| 172 | 1 |
"""simple docstring"""
def A ( snake_case :int ) -> bool:
if not isinstance(snake_case , snake_case ):
raise ValueError('check_bouncy() accepts only integer arguments' )
__UpperCamelCase = str(snake_case )
__UpperCamelCase = ''.join(sorted(snake_case ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def A ( snake_case :float = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError('solution() only accepts values from 0 to 100' )
__UpperCamelCase = 0
__UpperCamelCase = 1
while True:
if check_bouncy(snake_case ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(9_9)}''')
| 263 |
"""simple docstring"""
from math import factorial
UpperCamelCase : List[Any] = {str(d): factorial(d) for d in range(1_0)}
def A ( snake_case :int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case ) )
def A ( ) -> int:
__UpperCamelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case ) if sum_of_digit_factorial(snake_case ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 263 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A__ ( __magic_name__ ):
lowercase = 'beit'
def __init__( self : Optional[Any] , a : Optional[Any]=8_192 , a : Optional[Any]=768 , a : Dict=12 , a : List[str]=12 , a : Optional[int]=3_072 , a : int="gelu" , a : List[Any]=0.0 , a : Any=0.0 , a : Optional[Any]=0.0_2 , a : Optional[int]=1E-12 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Optional[Any]=3 , a : List[str]=False , a : Optional[Any]=False , a : Any=False , a : Tuple=False , a : Optional[Any]=0.1 , a : Optional[Any]=0.1 , a : Optional[Any]=True , a : Dict=[3, 5, 7, 11] , a : str=[1, 2, 3, 6] , a : Optional[int]=True , a : List[str]=0.4 , a : int=256 , a : Any=1 , a : Optional[int]=False , a : Optional[Any]=255 , **a : List[str] , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : int = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : Dict = patch_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : Optional[int] = use_mask_token
lowerCAmelCase__ : Any = use_absolute_position_embeddings
lowerCAmelCase__ : List[str] = use_relative_position_bias
lowerCAmelCase__ : Any = use_shared_relative_position_bias
lowerCAmelCase__ : int = layer_scale_init_value
lowerCAmelCase__ : Optional[int] = drop_path_rate
lowerCAmelCase__ : Any = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ : Any = out_indices
lowerCAmelCase__ : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ : Optional[int] = use_auxiliary_head
lowerCAmelCase__ : str = auxiliary_loss_weight
lowerCAmelCase__ : int = auxiliary_channels
lowerCAmelCase__ : Any = auxiliary_num_convs
lowerCAmelCase__ : Tuple = auxiliary_concat_input
lowerCAmelCase__ : List[str] = semantic_loss_ignore_index
class A__ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4 | 212 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class A__ ( __magic_name__ ):
lowercase = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase = Features({'audio': Audio()} )
lowercase = Features({'labels': ClassLabel} )
lowercase = "audio"
lowercase = "labels"
def _lowerCamelCase ( self : Dict , a : Tuple ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase__ : Tuple = copy.deepcopy(self )
lowerCAmelCase__ : List[Any] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 212 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Tuple = field
lowercase_ : Any = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
lowercase_ : Dict = Json(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , field=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _snake_case ( self ):
"""simple docstring"""
if self.streaming:
lowercase_ : Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : int = None
lowercase_ : Dict = None
lowercase_ : Any = None
lowercase_ : List[str] = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
lowercase_ : Dict = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
lowercase_ : int = dataset
lowercase_ : List[str] = path_or_buf
lowercase_ : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Dict = num_proc
lowercase_ : List[Any] = '''utf-8'''
lowercase_ : List[str] = to_json_kwargs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.to_json_kwargs.pop('''path_or_buf''' , __SCREAMING_SNAKE_CASE )
lowercase_ : str = self.to_json_kwargs.pop('''orient''' , '''records''' )
lowercase_ : Optional[int] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
lowercase_ : Tuple = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
lowercase_ : Dict = self.to_json_kwargs.pop('''compression''' , __SCREAMING_SNAKE_CASE )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__SCREAMING_SNAKE_CASE ) as buffer:
lowercase_ : Optional[int] = self._write(file_obj=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
lowercase_ : Any = self._write(
file_obj=self.path_or_buf , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **self.to_json_kwargs )
return written
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = args
lowercase_ : Union[str, Any] = query_table(
table=self.dataset.data , key=slice(__SCREAMING_SNAKE_CASE , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=__SCREAMING_SNAKE_CASE , orient=__SCREAMING_SNAKE_CASE , lines=__SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
lowercase_ : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__SCREAMING_SNAKE_CASE )
else:
lowercase_ , lowercase_ : Optional[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__SCREAMING_SNAKE_CASE )
return written
| 264 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''gpt_neox'''
def __init__( self , __SCREAMING_SNAKE_CASE=5_04_32 , __SCREAMING_SNAKE_CASE=61_44 , __SCREAMING_SNAKE_CASE=44 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=2_45_76 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=1_00_00 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=20_48 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = vocab_size
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Optional[int] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : Tuple = rotary_pct
lowercase_ : Optional[Any] = rotary_emb_base
lowercase_ : Any = attention_dropout
lowercase_ : str = hidden_dropout
lowercase_ : Dict = classifier_dropout
lowercase_ : Tuple = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Union[str, Any] = use_cache
lowercase_ : int = tie_word_embeddings
lowercase_ : Tuple = use_parallel_residual
lowercase_ : Optional[Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def _snake_case ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F'''got {self.rope_scaling}''' )
lowercase_ : List[Any] = self.rope_scaling.get('''type''' , __SCREAMING_SNAKE_CASE )
lowercase_ : int = self.rope_scaling.get('''factor''' , __SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 264 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
_lowerCAmelCase : Any = StableDiffusionInpaintPipeline
_lowerCAmelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_lowerCAmelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCAmelCase : Optional[Any] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase : int = frozenset([])
def _snake_case ( self : List[str] ):
torch.manual_seed(0 )
snake_case_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
snake_case_ : Any = PNDMScheduler(skip_prk_steps=__snake_case )
torch.manual_seed(0 )
snake_case_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
snake_case_ : Optional[Any] = CLIPTextModel(__snake_case )
snake_case_ : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ : Dict = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self : Dict , lowercase_ : Dict , lowercase_ : Any=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ : Tuple = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ : List[str] = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(__snake_case ).startswith('''mps''' ):
snake_case_ : Tuple = torch.manual_seed(__snake_case )
else:
snake_case_ : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case_ : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self : Dict ):
snake_case_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[int] = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionInpaintPipeline(**__snake_case )
snake_case_ : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
snake_case_ : str = self.get_dummy_inputs(__snake_case )
snake_case_ : Tuple = sd_pipe(**__snake_case ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[Any] = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase):
def _snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
snake_case_ : Any = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ : int = StableDiffusionInpaintPipeline.from_pretrained(__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
snake_case_ : Dict = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : str = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case_ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _snake_case ( self : Tuple ):
snake_case_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
snake_case_ : Any = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ : Dict = StableDiffusionInpaintPipeline.from_pretrained(
__snake_case , torch_dtype=torch.floataa , safety_checker=__snake_case , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
snake_case_ : Union[str, Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ : Optional[Any] = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case_ : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _snake_case ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ : str = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ : str = PNDMScheduler.from_pretrained(__snake_case , subfolder='''scheduler''' )
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
__snake_case , safety_checker=__snake_case , scheduler=__snake_case , torch_dtype=torch.floataa , )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ : Tuple = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='''np''' , )
snake_case_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 264 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mobilenet_v2'
def __init__( self , __snake_case=3 , __snake_case=2_2_4 , __snake_case=1.0 , __snake_case=8 , __snake_case=8 , __snake_case=6 , __snake_case=3_2 , __snake_case=True , __snake_case=True , __snake_case="relu6" , __snake_case=True , __snake_case=0.8 , __snake_case=0.02 , __snake_case=0.001 , __snake_case=2_5_5 , **__snake_case , ):
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
snake_case = num_channels
snake_case = image_size
snake_case = depth_multiplier
snake_case = depth_divisible_by
snake_case = min_depth
snake_case = expand_ratio
snake_case = output_stride
snake_case = first_layer_is_expansion
snake_case = finegrained_output
snake_case = hidden_act
snake_case = tf_padding
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = semantic_loss_ignore_index
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def a_ ( self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def a_ ( self ):
return 1E-4
| 127 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
__magic_name__ = IFInpaintingPipeline
__magic_name__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__magic_name__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__magic_name__ = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return self._get_dummy_components()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Any:
"""simple docstring"""
if str(a_ ).startswith('''mps''' ):
A : List[str] = torch.manual_seed(a_ )
else:
A : int = torch.Generator(device=a_ ).manual_seed(a_ )
A : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
A : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
A : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 359 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( snake_case__ = "laptop" ):
'''simple docstring'''
A : Tuple = F'https://www.amazon.in/laptop/s?k={product}'
A : Optional[int] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text )
# Initialize a Pandas dataframe with the column titles
A : List[str] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
A : Optional[Any] = item.ha.text
A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
A : Optional[int] = '''Not available'''
try:
A : str = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
A : List[Any] = ''''''
try:
A : Dict = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
A : str = float('''nan''' )
except AttributeError:
pass
A : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A : List[str] = ''' '''
A : Optional[Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase : Union[str, Any] = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 311 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = ['audio_values', 'audio_mask']
def __init__( self : Tuple ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : Any=[16, 16] ,_UpperCAmelCase : Dict=128 ,_UpperCAmelCase : Tuple=44100 ,_UpperCAmelCase : Optional[Any]=86 ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : Union[str, Any]=0.0 ,**_UpperCAmelCase : List[str] ,):
super().__init__(
feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : List[Any] = spectrogram_length
_a : Tuple = num_channels
_a : Dict = patch_size
_a : Optional[Any] = feature_size // self.patch_size[1]
_a : Tuple = n_fft
_a : List[Any] = sampling_rate // hop_length_to_sampling_rate
_a : List[str] = sampling_rate
_a : Union[str, Any] = padding_value
_a : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=_UpperCAmelCase ,min_frequency=0.0 ,max_frequency=2_20_50.0 ,sampling_rate=_UpperCAmelCase ,norm='slaney' ,mel_scale='slaney' ,).T
def __lowercase ( self : Tuple ,_UpperCAmelCase : np.array ):
_a : List[str] = spectrogram(
_UpperCAmelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='dB' ,db_range=80.0 ,)
_a : List[Any] = log_spec[:, :-1]
_a : Optional[Any] = log_spec - 20.0
_a : Dict = np.clip(log_spec / 40.0 ,-2.0 ,0.0 ) + 1.0
return log_spec
def __call__( self : int ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Optional[bool] = True ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Any ,):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a : Optional[Any] = isinstance(_UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_a : Optional[int] = is_batched_numpy or (
isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
_a : Union[str, Any] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ):
_a : Optional[int] = np.asarray(_UpperCAmelCase ,dtype=np.floataa )
elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_a : Any = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] ,_UpperCAmelCase ):
_a : Tuple = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_a : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_a : Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_a : Union[str, Any] = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
_a : Dict = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_a : Union[str, Any] = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_a : List[Any] = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
_a : Any = audio_features[i]
_a : List[Any] = feature
# return as BatchFeature
if return_attention_mask:
_a : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
_a : Tuple = {'audio_values': padded_audio_features}
_a : Optional[Any] = BatchFeature(data=_UpperCAmelCase ,tensor_type=_UpperCAmelCase )
return encoded_inputs
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _a ):
"""simple docstring"""
snake_case = """bert-generation"""
def __init__( self : Dict , __UpperCAmelCase : Dict=50358 , __UpperCAmelCase : Any=1024 , __UpperCAmelCase : Optional[int]=24 , __UpperCAmelCase : str=16 , __UpperCAmelCase : Optional[int]=4096 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Any=512 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Dict=1E-12 , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Any="absolute" , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
| 368 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_A = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = "sgugger/tiny-distilbert-classification"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "patrickvonplaten/t5-tiny-random"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , configs=[config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv" ) , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv" ) ).exists() )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : Any ):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt" ) ).exists() )
| 174 | 0 |
import os
import numpy
import onnx
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: int ) -> Optional[int]:
'''simple docstring'''
A__ = a.name
A__ = b.name
A__ = ""
A__ = ""
A__ = a == b
A__ = name_a
A__ = name_b
return res
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> Tuple:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: str ) -> Dict:
'''simple docstring'''
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Any:
'''simple docstring'''
A__ = os.path.dirname(SCREAMING_SNAKE_CASE_ )
A__ = os.path.basename(SCREAMING_SNAKE_CASE_ )
A__ = onnx.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE_ )
dup_set.add(SCREAMING_SNAKE_CASE_ )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("unexpected data type: " , SCREAMING_SNAKE_CASE_ )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE_ )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , "GB" )
A__ = sorted(SCREAMING_SNAKE_CASE_ )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = "optimized_" + model_file_name
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
onnx.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return new_model
| 68 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowerCAmelCase__ = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 68 | 1 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.getLogger(__name__)
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "config.json" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , "config.json" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , "config.json" ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin" ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False )-> List[str]:
"""simple docstring"""
UpperCamelCase_ = 2
if unlogit:
UpperCamelCase_ = torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = p * torch.log(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> int:
"""simple docstring"""
logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE_ ) ):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data ) )
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False )-> int:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCamelCase_ = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
UpperCamelCase_ = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
if head_mask is None:
UpperCamelCase_ = torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCamelCase_ = None
UpperCamelCase_ = 0.0
UpperCamelCase_ = 0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCamelCase_ = tuple(t.to(args.device ) for t in inputs )
((UpperCamelCase_ ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = entropy(attn.detach() , SCREAMING_SNAKE_CASE_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCamelCase_ = 2
UpperCamelCase_ = torch.pow(torch.pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCamelCase_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
logger.info("Head ranked by importance scores" )
UpperCamelCase_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCamelCase_ = torch.arange(
head_importance.numel() , device=args.device )
UpperCamelCase_ = head_ranks.view_as(SCREAMING_SNAKE_CASE_ )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , SCREAMING_SNAKE_CASE_ , original_score * args.masking_threshold )
UpperCamelCase_ = torch.ones_like(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCamelCase_ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCamelCase_ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCamelCase_ = float("Inf" )
UpperCamelCase_ = head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCamelCase_ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCamelCase_ = new_head_mask.view(-1 )
UpperCamelCase_ = 0.0
UpperCamelCase_ = new_head_mask.view_as(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
# Compute metric and head importance again
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , SCREAMING_SNAKE_CASE_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info("Final head mask" )
print_ad_tensor(SCREAMING_SNAKE_CASE_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
UpperCamelCase_ = datetime.now()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 1 / loss
UpperCamelCase_ = datetime.now() - before_time
UpperCamelCase_ = sum(p.numel() for p in model.parameters() )
UpperCamelCase_ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = [
v,
]
assert sum(len(SCREAMING_SNAKE_CASE_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = sum(p.numel() for p in model.parameters() )
UpperCamelCase_ = datetime.now()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = compute_heads_importance(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , compute_entropy=SCREAMING_SNAKE_CASE_ , compute_importance=SCREAMING_SNAKE_CASE_ , head_mask=SCREAMING_SNAKE_CASE_ , actually_pruned=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase_ = 1 / loss
UpperCamelCase_ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_0_0 )
save_model(SCREAMING_SNAKE_CASE_ , args.output_dir )
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=SCREAMING_SNAKE_CASE_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=SCREAMING_SNAKE_CASE_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=SCREAMING_SNAKE_CASE_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=SCREAMING_SNAKE_CASE_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=SCREAMING_SNAKE_CASE_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_2_8 , type=SCREAMING_SNAKE_CASE_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=SCREAMING_SNAKE_CASE_ , help="Batch size." )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=4_2 )
parser.add_argument("--local_rank" , type=SCREAMING_SNAKE_CASE_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE_ , default="" , help="Can be used for distant debugging." )
UpperCamelCase_ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCamelCase_ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCamelCase_ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCamelCase_ = torch.device("cuda" , args.local_rank )
UpperCamelCase_ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCamelCase_ = nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE_ )
elif args.n_gpu > 1:
UpperCamelCase_ = nn.DataParallel(SCREAMING_SNAKE_CASE_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , SCREAMING_SNAKE_CASE_ )
# Prepare dataset
UpperCamelCase_ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCamelCase_ = (torch.from_numpy(SCREAMING_SNAKE_CASE_ ),)
UpperCamelCase_ = TensorDataset(*SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = RandomSampler(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCamelCase_ = mask_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
prune_heads(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 354 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE :Tuple = 16
SCREAMING_SNAKE_CASE :Optional[Any] = 32
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1_6 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" )-> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase_ = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="max_length" , max_length=1_2_8 , return_tensors="pt" )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
"""simple docstring"""
model.eval()
UpperCamelCase_ = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
UpperCamelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase_ = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config["lr"]
UpperCamelCase_ = int(config["num_epochs"] )
UpperCamelCase_ = int(config["seed"] )
UpperCamelCase_ = int(config["batch_size"] )
UpperCamelCase_ = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
UpperCamelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase_ = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase_ = 1
UpperCamelCase_ = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
UpperCamelCase_ = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase_ = 0
UpperCamelCase_ = evaluate.load("glue" , "mrpc" )
UpperCamelCase_ = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase_ = args.resume_from_checkpoint.split("epoch_" )[1]
UpperCamelCase_ = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase_ = int(SCREAMING_SNAKE_CASE_ ) + 1
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint performance:" , SCREAMING_SNAKE_CASE_ )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , "r" ) as f:
UpperCamelCase_ = json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase_ = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.loss
UpperCamelCase_ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase_ = f"epoch_{epoch}"
UpperCamelCase_ = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = accuracy
UpperCamelCase_ = lr_scheduler.get_lr()[0]
UpperCamelCase_ = optimizer.param_groups[0]["lr"]
UpperCamelCase_ = epoch
UpperCamelCase_ = overall_step
accelerator.print(f"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase( )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=SCREAMING_SNAKE_CASE_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
"--output_dir" , type=SCREAMING_SNAKE_CASE_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=SCREAMING_SNAKE_CASE_ , default=2 , help="Number of train epochs." , )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 60 | 0 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class snake_case ( A__ ):
SCREAMING_SNAKE_CASE_ : int = 42
@flax_register_to_config
class snake_case ( nn.Module, A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : Dict = 4
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Tuple = (
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ("""UpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""")
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = (3_20, 6_40, 12_80, 12_80)
SCREAMING_SNAKE_CASE_ : List[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = 8
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = 12_80
SCREAMING_SNAKE_CASE_ : str = 0.0
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : Any = jnp.floataa
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def lowercase_ ( self : List[str] , UpperCamelCase__ : jax.random.KeyArray)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase: Dict = jnp.zeros(lowerCamelCase_ , dtype=jnp.floataa)
__lowerCAmelCase: List[Any] = jnp.ones((1,) , dtype=jnp.intaa)
__lowerCAmelCase: Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa)
__lowerCAmelCase: str = jax.random.split(lowerCamelCase_)
__lowerCAmelCase: List[str] = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)["params"]
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: Dict = self.block_out_channels
__lowerCAmelCase: Any = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.")
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase: str = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase: List[str] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCAmelCase: List[str] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift)
__lowerCAmelCase: Union[str, Any] = FlaxTimestepEmbedding(lowerCamelCase_ , dtype=self.dtype)
__lowerCAmelCase: Any = self.only_cross_attention
if isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowerCAmelCase: Dict = (only_cross_attention,) * len(self.down_block_types)
if isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowerCAmelCase: Optional[int] = (num_attention_heads,) * len(self.down_block_types)
# down
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types):
__lowerCAmelCase: List[str] = output_channel
__lowerCAmelCase: int = block_out_channels[i]
__lowerCAmelCase: List[Any] = i == len(lowerCamelCase_) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase: List[str] = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCAmelCase: List[Any] = FlaxDownBlockaD(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase_)
__lowerCAmelCase: List[Any] = down_blocks
# mid
__lowerCAmelCase: Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__lowerCAmelCase: str = []
__lowerCAmelCase: Optional[Any] = list(reversed(lowerCamelCase_))
__lowerCAmelCase: Optional[Any] = list(reversed(lowerCamelCase_))
__lowerCAmelCase: Union[str, Any] = list(reversed(lowerCamelCase_))
__lowerCAmelCase: str = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types):
__lowerCAmelCase: List[Any] = output_channel
__lowerCAmelCase: List[Any] = reversed_block_out_channels[i]
__lowerCAmelCase: List[Any] = reversed_block_out_channels[min(i + 1 , len(lowerCamelCase_) - 1)]
__lowerCAmelCase: str = i == len(lowerCamelCase_) - 1
if up_block_type == "CrossAttnUpBlock2D":
__lowerCAmelCase: Dict = FlaxCrossAttnUpBlockaD(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCAmelCase: Union[str, Any] = FlaxUpBlockaD(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowerCamelCase_)
__lowerCAmelCase: Dict = output_channel
__lowerCAmelCase: Dict = up_blocks
# out
__lowerCAmelCase: Tuple = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5)
__lowerCAmelCase: List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : int=None , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , )-> str:
'''simple docstring'''
if not isinstance(lowerCamelCase_ , jnp.ndarray):
__lowerCAmelCase: Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa)
elif isinstance(lowerCamelCase_ , jnp.ndarray) and len(timesteps.shape) == 0:
__lowerCAmelCase: List[Any] = timesteps.astype(dtype=jnp.floataa)
__lowerCAmelCase: Tuple = jnp.expand_dims(lowerCamelCase_ , 0)
__lowerCAmelCase: List[Any] = self.time_proj(lowerCamelCase_)
__lowerCAmelCase: Optional[int] = self.time_embedding(lowerCamelCase_)
# 2. pre-process
__lowerCAmelCase: Optional[Any] = jnp.transpose(lowerCamelCase_ , (0, 2, 3, 1))
__lowerCAmelCase: List[Any] = self.conv_in(lowerCamelCase_)
# 3. down
__lowerCAmelCase: Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowerCAmelCase: str = down_block(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , deterministic=not train)
else:
__lowerCAmelCase: str = down_block(lowerCamelCase_ , lowerCamelCase_ , deterministic=not train)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__lowerCAmelCase: List[str] = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowerCamelCase_ , lowerCamelCase_):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase: Any = new_down_block_res_samples
# 4. mid
__lowerCAmelCase: List[str] = self.mid_block(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , deterministic=not train)
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__lowerCAmelCase: Any = down_block_res_samples[-(self.layers_per_block + 1) :]
__lowerCAmelCase: str = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowerCamelCase_ , lowerCamelCase_):
__lowerCAmelCase: Union[str, Any] = up_block(
lowerCamelCase_ , temb=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , res_hidden_states_tuple=lowerCamelCase_ , deterministic=not train , )
else:
__lowerCAmelCase: Optional[int] = up_block(lowerCamelCase_ , temb=lowerCamelCase_ , res_hidden_states_tuple=lowerCamelCase_ , deterministic=not train)
# 6. post-process
__lowerCAmelCase: str = self.conv_norm_out(lowerCamelCase_)
__lowerCAmelCase: Optional[Any] = nn.silu(lowerCamelCase_)
__lowerCAmelCase: Optional[Any] = self.conv_out(lowerCamelCase_)
__lowerCAmelCase: List[Any] = jnp.transpose(lowerCamelCase_ , (0, 3, 1, 2))
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowerCamelCase_)
| 217 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :int=[10, 20, 30, 40] , lowerCamelCase_ :Dict=[2, 2, 3, 2] , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Any=37 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=10 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Optional[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ :List[str]=[2, 3, 4] , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =parent
lowerCamelCase__ : Optional[int] =batch_size
lowerCamelCase__ : List[Any] =image_size
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : Optional[int] =num_stages
lowerCamelCase__ : Optional[int] =hidden_sizes
lowerCamelCase__ : Optional[Any] =depths
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[Any] =use_labels
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Dict =num_labels
lowerCamelCase__ : Union[str, Any] =initializer_range
lowerCamelCase__ : Dict =out_features
lowerCamelCase__ : List[str] =out_indices
lowerCamelCase__ : Any =scope
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Tuple =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =ConvNextVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =ConvNextVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any =model(lowerCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : Dict =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] =model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Optional[int] ={'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaModelTester(self )
lowerCamelCase__ : Tuple =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : List[str] =True
if model_class.__name__ in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]:
continue
lowerCamelCase__ : Any =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : Union[str, Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : Optional[Any] =True
if (
model_class.__name__
in [*get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ : List[str] =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase_ )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] ):
lowerCamelCase__ : Tuple =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCamelCase__ : int =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Union[str, Any] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] =ConvNextVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( ) ->List[str]:
lowerCamelCase__ : Tuple =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.default_image_processor
lowerCamelCase__ : int =prepare_img()
lowerCamelCase__ : List[Any] =preprocessor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] =model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Union[str, Any] =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCamelCase__ : Dict =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) | 126 | 0 |
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int:
return sum(e for e in range(3 , __UpperCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 368 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
import functools
from typing import Any
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowerCamelCase : dict[str, Any] = {}
lowerCamelCase : int = "WORD_KEEPER"
for word in words:
lowerCamelCase : List[Any] = trie
for c in word:
if c not in trie_node:
lowerCamelCase : Dict = {}
lowerCamelCase : List[Any] = trie_node[c]
lowerCamelCase : Tuple = True
lowerCamelCase : str = len(SCREAMING_SNAKE_CASE_ )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE_ ) -> bool:
if index == len_string:
return True
lowerCamelCase : Dict = trie
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[Any] = trie_node.get(string[i] , SCREAMING_SNAKE_CASE_ )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
lowercase__: Tuple = []
lowercase__: Dict = 1
while len(__UpperCAmelCase ) < 1e6:
constant.append(str(__UpperCAmelCase ) )
i += 1
lowercase__: str = ''''''.join(__UpperCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 350 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 2 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : torch.FloatTensor
_SCREAMING_SNAKE_CASE : torch.FloatTensor
_SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
class UpperCamelCase__ ( __lowercase ,__lowercase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2
@register_to_config
def __init__(self : Optional[int] , snake_case_ : float = 0.02 , snake_case_ : float = 1_0_0 , snake_case_ : float = 1.007 , snake_case_ : float = 8_0 , snake_case_ : float = 0.05 , snake_case_ : float = 5_0 , ):
# standard deviation of the initial noise distribution
__a : List[str] = sigma_max
# setable values
__a : int = None
__a : np.IntTensor = None
__a : torch.FloatTensor = None # sigma(t_i)
def lowerCAmelCase (self : int , snake_case_ : torch.FloatTensor , snake_case_ : Optional[int] = None ):
return sample
def lowerCAmelCase (self : int , snake_case_ : int , snake_case_ : Union[str, torch.device] = None ):
__a : Tuple = num_inference_steps
__a : Optional[int] = np.arange(0 , self.num_inference_steps )[::-1].copy()
__a : str = torch.from_numpy(snake_case_ ).to(snake_case_ )
__a : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__a : Optional[Any] = torch.tensor(snake_case_ , dtype=torch.floataa , device=snake_case_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
__a : List[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__a : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
__a : str = self.config.s_noise * randn_tensor(sample.shape , generator=snake_case_ ).to(sample.device )
__a : int = sigma + gamma * sigma
__a : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase (self : Any , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : float , snake_case_ : torch.FloatTensor , snake_case_ : bool = True , ):
__a : Dict = sample_hat + sigma_hat * model_output
__a : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
__a : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case_ , derivative=snake_case_ , pred_original_sample=snake_case_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : torch.FloatTensor , snake_case_ : float , snake_case_ : float , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : torch.FloatTensor , snake_case_ : bool = True , ):
__a : Optional[Any] = sample_prev + sigma_prev * model_output
__a : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
__a : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case_ , derivative=snake_case_ , pred_original_sample=snake_case_ )
def lowerCAmelCase (self : List[Any] , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Tuple ):
raise NotImplementedError()
| 216 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 216 | 1 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a ( a__ ):
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase = value_function
lowerCAmelCase = unet
lowerCAmelCase = scheduler
lowerCAmelCase = env
lowerCAmelCase = env.get_dataset()
lowerCAmelCase = {}
for key in self.data.keys():
try:
lowerCAmelCase = self.data[key].mean()
except: # noqa: E722
pass
lowerCAmelCase = {}
for key in self.data.keys():
try:
lowerCAmelCase = self.data[key].std()
except: # noqa: E722
pass
lowerCAmelCase = env.observation_space.shape[0]
lowerCAmelCase = env.action_space.shape[0]
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case , device=self.unet.device )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for key, val in cond.items():
lowerCAmelCase = val.clone()
return x_in
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = x.shape[0]
lowerCAmelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCAmelCase = torch.full((batch_size,) , _snake_case , device=self.unet.device , dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , _snake_case ).sample
lowerCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0]
lowerCAmelCase = self.scheduler._get_variance(_snake_case )
lowerCAmelCase = torch.exp(0.5 * posterior_variance )
lowerCAmelCase = model_std * grad
lowerCAmelCase = 0
lowerCAmelCase = x.detach()
lowerCAmelCase = x + scale * grad
lowerCAmelCase = self.reset_xa(_snake_case , _snake_case , self.action_dim )
lowerCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , _snake_case ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowerCAmelCase = self.scheduler.step(_snake_case , _snake_case , _snake_case , predict_epsilon=_snake_case )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowerCAmelCase = self.reset_xa(_snake_case , _snake_case , self.action_dim )
lowerCAmelCase = self.to_torch(_snake_case )
return x, y
def __call__( self , _snake_case , _snake_case=64 , _snake_case=32 , _snake_case=2 , _snake_case=0.1 ):
"""simple docstring"""
lowerCAmelCase = self.normalize(_snake_case , 'observations' )
lowerCAmelCase = obs[None].repeat(_snake_case , axis=0 )
lowerCAmelCase = {0: self.to_torch(_snake_case )}
lowerCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCAmelCase = randn_tensor(_snake_case , device=self.unet.device )
lowerCAmelCase = self.reset_xa(_snake_case , _snake_case , self.action_dim )
lowerCAmelCase = self.to_torch(_snake_case )
# run the diffusion process
lowerCAmelCase ,lowerCAmelCase = self.run_diffusion(_snake_case , _snake_case , _snake_case , _snake_case )
# sort output trajectories by value
lowerCAmelCase = y.argsort(0 , descending=_snake_case ).squeeze()
lowerCAmelCase = x[sorted_idx]
lowerCAmelCase = sorted_values[:, :, : self.action_dim]
lowerCAmelCase = actions.detach().cpu().numpy()
lowerCAmelCase = self.de_normalize(_snake_case , key='actions' )
# select the action with the highest value
if y is not None:
lowerCAmelCase = 0
else:
# if we didn't run value guiding, select a random action
lowerCAmelCase = np.random.randint(0 , _snake_case )
lowerCAmelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 309 |
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__UpperCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=1_8 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = size if size is not None else {'height': 2_0, 'width': 2_0}
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Union[str, Any] = min_resolution
UpperCAmelCase_ : Union[str, Any] = max_resolution
UpperCAmelCase_ : Optional[int] = size
UpperCAmelCase_ : Union[str, Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = do_convert_rgb
UpperCAmelCase_ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase_ : Optional[int] = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
UpperCAmelCase_ : int = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = PixaStructImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase_ : List[str] = 2_0_4_8
UpperCAmelCase_ : List[str] = image_processor(_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1E-3 , rtol=1E-3 ) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processor
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Optional[Any] = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processor
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase_ : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
UpperCAmelCase_ : Any = 'Hello'
UpperCAmelCase_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Any = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> Any:
# Initialize image_processor
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
UpperCAmelCase_ : List[Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : int = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self ) -> int:
# Initialize image_processor
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : List[str] = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase_ : int = 3
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def __UpperCAmelCase ( self ) -> str:
# Initialize image_processor
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : List[str] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase_ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase_ : Any = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 29 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 1 |
'''simple docstring'''
import string
import numpy
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , snake_case__ )
class A :
__magic_name__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__magic_name__ = numpy.vectorize(lambda __snake_case : x % 36 )
__magic_name__ = numpy.vectorize(__snake_case )
def __init__( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : List[Any] = self.modulus(SCREAMING_SNAKE_CASE ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
A : Optional[Any] = encrypt_key.shape[0]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self.key_string.index(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.key_string[round(SCREAMING_SNAKE_CASE )]
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A : List[Any] = det % len(self.key_string )
A : List[str] = len(self.key_string )
if greatest_common_divisor(SCREAMING_SNAKE_CASE , len(self.key_string ) ) != 1:
A : Tuple = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : List[Any] = [char for char in text.upper() if char in self.key_string]
A : str = chars[-1]
while len(SCREAMING_SNAKE_CASE ) % self.break_key != 0:
chars.append(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Dict = self.process_text(text.upper() )
A : Union[str, Any] = ''''''
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ):
A : Any = text[i : i + self.break_key]
A : int = [self.replace_letters(SCREAMING_SNAKE_CASE ) for char in batch]
A : Tuple = numpy.array([vec] ).T
A : Optional[int] = self.modulus(self.encrypt_key.dot(SCREAMING_SNAKE_CASE ) ).T.tolist()[
0
]
A : str = ''''''.join(
self.replace_digits(SCREAMING_SNAKE_CASE ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __lowerCAmelCase ( self ) -> numpy.ndarray:
"""simple docstring"""
A : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
A : Union[str, Any] = det % len(self.key_string )
A : int = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
A : int = i
break
A : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = self.make_decrypt_key()
A : Tuple = self.process_text(text.upper() )
A : Tuple = ''''''
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - self.break_key + 1 , self.break_key ):
A : Any = text[i : i + self.break_key]
A : Dict = [self.replace_letters(SCREAMING_SNAKE_CASE ) for char in batch]
A : int = numpy.array([vec] ).T
A : List[Any] = self.modulus(decrypt_key.dot(SCREAMING_SNAKE_CASE ) ).T.tolist()[0]
A : Tuple = ''''''.join(
self.replace_digits(SCREAMING_SNAKE_CASE ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Union[str, Any] = int(input('''Enter the order of the encryption key: ''' ) )
A : int = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(snake_case__ ):
A : List[str] = [int(snake_case__ ) for x in input().split()]
hill_matrix.append(snake_case__ )
A : str = HillCipher(numpy.array(snake_case__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
A : Tuple = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
A : List[str] = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(snake_case__ ) )
elif option == "2":
A : Optional[int] = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 311 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = vocab_size
A : List[str] = hidden_size
A : List[Any] = d_kv
A : Optional[Any] = d_ff
A : Dict = num_layers
A : Dict = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : Dict = dropout_rate
A : Dict = layer_norm_epsilon
A : Tuple = initializer_factor
A : Union[str, Any] = use_cache
A : int = eos_token_id
A : List[str] = decoder_start_token_id
# for backwards compatibility
A : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[Any] = patch_embed_hidden_size
A : Union[str, Any] = d_ff
A : Dict = dropout_rate
A : str = num_hidden_layers
A : Dict = num_attention_heads
A : Tuple = initializer_range
A : List[str] = initializer_factor
A : Union[str, Any] = attention_dropout
A : Tuple = layer_norm_eps
A : int = dense_act_fn
A : Optional[int] = seq_len
A : Tuple = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : Optional[Any] = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : str = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Any = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Dict = self.text_config.eos_token_id
A : Union[str, Any] = initializer_factor
A : Tuple = initializer_range
A : Optional[Any] = self.initializer_range
A : int = self.initializer_range
A : Tuple = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = copy.deepcopy(self.__dict__ )
A : Dict = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 311 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Tuple:
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
_lowercase =json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , _a )
def __A (self ) -> Any:
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =os.path.join(_a , '''feat_extract.json''' )
feat_extract_first.to_json_file(_a )
_lowercase =self.feature_extraction_class.from_json_file(_a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A (self ) -> Tuple:
_lowercase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_lowercase =self.feature_extraction_class.from_pretrained(_a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A (self ) -> Tuple:
_lowercase =self.feature_extraction_class()
self.assertIsNotNone(_a )
| 5 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =str(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[n]
for i in range(1 , len(_UpperCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
if len(str(_UpperCamelCase ) ) > 3:
if not is_prime(int(str(_UpperCamelCase )[-3:] ) ) or not is_prime(int(str(_UpperCamelCase )[:3] ) ):
return False
return True
def _lowerCAmelCase ( _UpperCamelCase : int = 11 ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =13
while len(_UpperCamelCase ) != count:
if validate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =list_truncated_nums(_UpperCamelCase )
if all(is_prime(_UpperCamelCase ) for i in list_nums ):
list_truncated_primes.append(_UpperCamelCase )
num += 2
return list_truncated_primes
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'''{sum(compute_truncated_primes(1_1)) = }''')
| 47 | 0 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Dict, lowerCAmelCase : str = "▁", lowerCAmelCase : bool = True, lowerCAmelCase : Union[str, AddedToken] = "<unk>", lowerCAmelCase : Union[str, AddedToken] = "</s>", lowerCAmelCase : Union[str, AddedToken] = "<pad>", ) -> Optional[int]:
lowercase : str = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
lowercase : Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
lowercase : Union[str, Any] = token_dict['token']
lowercase : Any = Tokenizer(Unigram() )
lowercase : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ), ' ' ),
normalizers.Lowercase(),
] )
lowercase : Union[str, Any] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowerCAmelCase, add_prefix_space=lowerCAmelCase ),
pre_tokenizers.Digits(individual_digits=lowerCAmelCase ),
pre_tokenizers.Punctuation(),
] )
lowercase : Union[str, Any] = decoders.Metaspace(replacement=lowerCAmelCase, add_prefix_space=lowerCAmelCase )
lowercase : Union[str, Any] = TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''', special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])], )
lowercase : Union[str, Any] = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowerCAmelCase, lowerCAmelCase )
def lowercase ( self : Optional[Any], lowerCAmelCase : Union[str, List[str]], lowerCAmelCase : int = 8000, lowerCAmelCase : bool = True, ) -> Tuple:
lowercase : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase, special_tokens=self.special_tokens_list, show_progress=lowerCAmelCase, )
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowercase : Optional[Any] = [files]
self._tokenizer.train(lowerCAmelCase, trainer=lowerCAmelCase )
self.add_unk_id()
def lowercase ( self : Union[str, Any], lowerCAmelCase : Union[Iterator[str], Iterator[Iterator[str]]], lowerCAmelCase : int = 8000, lowerCAmelCase : bool = True, ) -> Any:
lowercase : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=lowerCAmelCase, special_tokens=self.special_tokens_list, show_progress=lowerCAmelCase, )
self._tokenizer.train_from_iterator(lowerCAmelCase, trainer=lowerCAmelCase )
self.add_unk_id()
def lowercase ( self : List[Any] ) -> List[str]:
lowercase : Optional[Any] = json.loads(self._tokenizer.to_str() )
lowercase : Dict = self.special_tokens['unk']['id']
lowercase : Any = Tokenizer.from_str(json.dumps(lowerCAmelCase ) )
| 53 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCamelCase: Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 53 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''PerceiverFeatureExtractor''']
__snake_case = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 320 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class _UpperCAmelCase ( unittest.TestCase , _A ):
def A ( self : List[Any] ) -> Dict:
lowercase_ : Optional[int] = load_tool('''text-question-answering''' )
self.tool.setup()
lowercase_ : Union[str, Any] = load_tool('''text-question-answering''' , remote=A )
def A ( self : Any ) -> List[str]:
lowercase_ : Union[str, Any] = self.tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : str ) -> List[str]:
lowercase_ : int = self.remote_tool(A , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[Any] ) -> int:
lowercase_ : Optional[Any] = self.tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
def A ( self : List[str] ) -> Optional[int]:
lowercase_ : int = self.remote_tool(text=A , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(A , '''launched the BigScience Research Workshop''' )
| 33 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=() , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Union[str, Any]="no" , __SCREAMING_SNAKE_CASE : List[Any]="29500" ):
'''simple docstring'''
__snake_case : Any = False
__snake_case : Optional[int] = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
__snake_case : Any = True
elif "IPython" in sys.modules:
__snake_case : Optional[int] = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
__snake_case : str = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , __SCREAMING_SNAKE_CASE ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
__snake_case : Optional[int] = 8
__snake_case : Optional[Any] = PrepareForLaunch(__SCREAMING_SNAKE_CASE , distributed_type="""TPU""" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*__SCREAMING_SNAKE_CASE )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port=__SCREAMING_SNAKE_CASE , mixed_precision=__SCREAMING_SNAKE_CASE ):
__snake_case : Optional[int] = PrepareForLaunch(__SCREAMING_SNAKE_CASE , distributed_type="""MULTI_GPU""" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__snake_case : int = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=() , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
__snake_case : Dict = PrepareForLaunch(__SCREAMING_SNAKE_CASE , debug=__SCREAMING_SNAKE_CASE )
start_processes(__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , nprocs=__SCREAMING_SNAKE_CASE , start_method="""fork""" )
| 20 | import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
__snake_case : Optional[int] = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
__snake_case , __snake_case : Tuple = input_paths_and_base_extractors[compression_format]
if input_path is None:
__snake_case : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
assert base_extractor.is_extractable(__SCREAMING_SNAKE_CASE )
__snake_case : List[str] = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : List[str] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Optional[Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : int = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
__snake_case : int = input_paths[compression_format]
if input_path is None:
__snake_case : int = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__SCREAMING_SNAKE_CASE )
__snake_case : Any = Extractor.infer_extractor_format(__SCREAMING_SNAKE_CASE )
assert extractor_format is not None
__snake_case : Tuple = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__snake_case : Union[str, Any] = file_path.read_text(encoding="""utf-8""" )
else:
__snake_case : Union[str, Any] = output_path.read_text(encoding="""utf-8""" )
__snake_case : Optional[Any] = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import tarfile
__snake_case : List[str] = tmp_path / """data_dot_dot"""
directory.mkdir()
__snake_case : Optional[Any] = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(__SCREAMING_SNAKE_CASE , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import tarfile
__snake_case : Dict = tmp_path / """data_sym_link"""
directory.mkdir()
__snake_case : Tuple = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__SCREAMING_SNAKE_CASE )
with tarfile.TarFile(__SCREAMING_SNAKE_CASE , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : Any = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
__snake_case : int = insecure_tar_files[insecure_tar_file]
__snake_case : Optional[int] = tmp_path / """extracted"""
TarExtractor.extract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
# We should have less false positives than zipfile.is_zipfile
# We do that by checking only the magic number
__snake_case : Optional[Any] = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
__snake_case : List[str] = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__SCREAMING_SNAKE_CASE )
assert zipfile.is_zipfile(str(__SCREAMING_SNAKE_CASE ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__SCREAMING_SNAKE_CASE ) # but we're right
| 20 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : Optional[int] = ["""image_processor""", """tokenizer"""]
__a : str = """CLIPImageProcessor"""
__a : Optional[Any] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] , lowercase : int=None , lowercase : Tuple=None , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
def __call__( self : int , lowercase : List[str]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : int ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Any , **lowercase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : Any , **lowercase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
@property
def A ( self : Any ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase , )
return self.image_processor
| 34 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Tuple = size if size is not None else {'''shortest_edge''': 224}
A_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A_ : str = do_resize
A_ : Tuple = size
A_ : Optional[Any] = resample
A_ : Tuple = do_center_crop
A_ : List[Any] = crop_size
A_ : Optional[int] = do_rescale
A_ : Tuple = rescale_factor
A_ : Any = do_normalize
A_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Any = do_convert_rgb
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A_ : Any = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->PIL.Image.Image:
'''simple docstring'''
A_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : int = do_normalize if do_normalize is not None else self.do_normalize
A_ : Tuple = image_mean if image_mean is not None else self.image_mean
A_ : Tuple = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : List[str] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A_ : Union[str, Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A_ : Tuple = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A_ : List[Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
A_ : str = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
A_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 186 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> List[Any]:
__lowerCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=UpperCAmelCase__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=UpperCAmelCase__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=UpperCAmelCase__ )
return parser.parse_args()
def _lowerCAmelCase ( ) -> Optional[int]:
__lowerCAmelCase = parse_args()
# Import training_script as a module.
__lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase = script_fpath.stem
__lowerCAmelCase = importlib.import_module(UpperCAmelCase__ )
# Patch sys.argv
__lowerCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 371 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =KandinskyVaaImgaImgPipeline
a : List[Any] =["""image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Optional[Any] =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = """A red cartoon frog, 4k"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,strength=0.2,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'encoder-decoder'
_a = True
def __init__( self : Optional[int], **lowerCamelCase : Optional[int] )-> Optional[Any]:
super().__init__(**lowerCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCamelCase__ : str =kwargs.pop('''encoder''' )
lowerCamelCase__ : List[str] =encoder_config.pop('''model_type''' )
lowerCamelCase__ : List[Any] =kwargs.pop('''decoder''' )
lowerCamelCase__ : Optional[Any] =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ : List[Any] =AutoConfig.for_model(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Optional[Any] =AutoConfig.for_model(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Optional[Any] =True
@classmethod
def snake_case ( cls : Any, lowerCamelCase : PretrainedConfig, lowerCamelCase : PretrainedConfig, **lowerCamelCase : Union[str, Any] )-> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCamelCase__ : List[str] =True
lowerCamelCase__ : List[Any] =True
return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **lowerCamelCase )
def snake_case ( self : Optional[int] )-> List[Any]:
lowerCamelCase__ : str =copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Optional[int] =self.encoder.to_dict()
lowerCamelCase__ : Optional[Any] =self.decoder.to_dict()
lowerCamelCase__ : Union[str, Any] =self.__class__.model_type
return output
| 238 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = ['image_processor', 'tokenizer']
_a = 'BlipImageProcessor'
_a = 'AutoTokenizer'
def __init__( self : Tuple, lowerCamelCase : List[str], lowerCamelCase : Dict )-> str:
lowerCamelCase__ : Any =False
super().__init__(lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : List[str] =self.image_processor
def __call__( self : Union[str, Any], lowerCamelCase : ImageInput = None, lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCamelCase : bool = True, lowerCamelCase : Union[bool, str, PaddingStrategy] = False, lowerCamelCase : Union[bool, str, TruncationStrategy] = None, lowerCamelCase : Optional[int] = None, lowerCamelCase : int = 0, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[bool] = None, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = False, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[str, TensorType]] = None, **lowerCamelCase : List[str], )-> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase__ : str =self.tokenizer
lowerCamelCase__ : str =self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
return text_encoding
# add pixel_values
lowerCamelCase__ : Optional[int] =self.image_processor(lowerCamelCase, return_tensors=lowerCamelCase )
if text is not None:
lowerCamelCase__ : Union[str, Any] =self.tokenizer(
text=lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, stride=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_attention_mask=lowerCamelCase, return_overflowing_tokens=lowerCamelCase, return_special_tokens_mask=lowerCamelCase, return_offsets_mapping=lowerCamelCase, return_token_type_ids=lowerCamelCase, return_length=lowerCamelCase, verbose=lowerCamelCase, return_tensors=lowerCamelCase, **lowerCamelCase, )
else:
lowerCamelCase__ : Optional[Any] =None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def snake_case ( self : str, *lowerCamelCase : Any, **lowerCamelCase : List[str] )-> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : Dict, *lowerCamelCase : str, **lowerCamelCase : str )-> Union[str, Any]:
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self : List[str] )-> List[str]:
lowerCamelCase__ : Union[str, Any] =self.tokenizer.model_input_names
lowerCamelCase__ : List[str] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 238 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( UpperCamelCase__ : Dict, UpperCamelCase__ : bool = True, UpperCamelCase__ : float = math.inf, UpperCamelCase__ : float = -math.inf, UpperCamelCase__ : float = math.inf, UpperCamelCase__ : float = -math.inf, UpperCamelCase__ : bool = False, UpperCamelCase__ : float = 100, UpperCamelCase__ : float = 0.01, UpperCamelCase__ : float = 1, ):
'''simple docstring'''
UpperCamelCase__ = False
UpperCamelCase__ = search_prob
UpperCamelCase__ = start_temperate
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = None
while not search_end:
UpperCamelCase__ = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase__ = current_state
scores.append(UpperCamelCase__ )
iterations += 1
UpperCamelCase__ = None
UpperCamelCase__ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase__ = random.randint(0, len(UpperCamelCase__ ) - 1 ) # picking a random neighbor
UpperCamelCase__ = neighbors.pop(UpperCamelCase__ )
UpperCamelCase__ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase__ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase__ = picked_neighbor
else:
UpperCamelCase__ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase__ = picked_neighbor
UpperCamelCase__ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase__ = True
else:
UpperCamelCase__ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(UpperCamelCase__ ), UpperCamelCase__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( UpperCamelCase__ : str, UpperCamelCase__ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowercase = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowercase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowercase = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : int ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'{local_min.score()}'
)
lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f'{local_min.score()}'
)
| 35 | from __future__ import annotations
lowercase = list[list[int]]
# assigning initial values to the grid
lowercase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase__ : Matrix, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = digit
if sudoku(UpperCamelCase__ ) is not None:
return grid
UpperCamelCase__ = 0
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(UpperCamelCase__, end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowercase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 35 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCamelCase : List[Any] = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 2 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = b.T
__UpperCAmelCase : Union[str, Any] = np.sum(np.square(_UpperCamelCase ) , axis=1 )
__UpperCAmelCase : List[Any] = np.sum(np.square(_UpperCamelCase ) , axis=0 )
__UpperCAmelCase : int = np.matmul(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Tuple = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = x.reshape(-1 , 3 )
__UpperCAmelCase : Optional[Any] = squared_euclidean_distance(_UpperCamelCase , _UpperCamelCase )
return np.argmin(_UpperCamelCase , axis=1 )
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""pixel_values"""]
def __init__( self : List[str] , UpperCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : bool = True , **UpperCamelCase : int , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
__UpperCAmelCase : Optional[Any] = size if size is not None else {"""height""": 256, """width""": 256}
__UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = np.array(UpperCamelCase ) if clusters is not None else None
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : List[str] = size
__UpperCAmelCase : Dict = resample
__UpperCAmelCase : int = do_normalize
__UpperCAmelCase : List[Any] = do_color_quantize
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : str , ):
'''simple docstring'''
__UpperCAmelCase : Any = get_size_dict(UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
UpperCamelCase , size=(size["""height"""], size["""width"""]) , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = rescale(image=UpperCamelCase , scale=1 / 127.5 , data_format=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = image - 1
return image
def lowerCamelCase__ ( self : Dict , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase : Optional[int] , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Any = size if size is not None else self.size
__UpperCAmelCase : Any = get_size_dict(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
__UpperCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Any = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase : Tuple = clusters if clusters is not None else self.clusters
__UpperCAmelCase : Tuple = np.array(UpperCamelCase )
__UpperCAmelCase : int = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
__UpperCAmelCase : Dict = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_normalize:
__UpperCAmelCase : Any = [self.normalize(image=UpperCamelCase ) for image in images]
if do_color_quantize:
__UpperCAmelCase : str = [to_channel_dimension_format(UpperCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase : Optional[int] = np.array(UpperCamelCase )
__UpperCAmelCase : Any = color_quantize(UpperCamelCase , UpperCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase : Dict = images.shape[0]
__UpperCAmelCase : Optional[Any] = images.reshape(UpperCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase : int = list(UpperCamelCase )
else:
__UpperCAmelCase : List[Any] = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
__UpperCAmelCase : List[str] = {"""input_ids""": images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 115 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __UpperCAmelCase ( ):
_UpperCAmelCase : str = HfArgumentParser(a_ )
_UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase : List[str] = TensorFlowBenchmark(args=a_ )
try:
_UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_UpperCAmelCase : str = " ".join(str(a_ ).split(" " )[:-1] )
_UpperCAmelCase : Any = ""
_UpperCAmelCase : Tuple = eval(str(a_ ).split(" " )[-1] )
_UpperCAmelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(a_ )
if len(a_ ) > 0:
_UpperCAmelCase : Optional[int] = full_error_msg + begin_error_msg + str(a_ )
raise ValueError(a_ )
benchmark.run()
if __name__ == "__main__":
main() | 17 | '''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,)
UpperCamelCase_ : Tuple = 10
def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowerCAmelCase__ )
return config
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Any = self.dummy_model()
_UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = output.prev_sample
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = output.prev_sample
_UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : str = self.dummy_model()
_UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : str = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : int = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = output.prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3 | 17 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : Optional[Any] = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 107 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 0 |
'''simple docstring'''
lowercase__ : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowercase__ : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowercase__ : List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 287 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = CamembertTokenizer
_snake_case : str = CamembertTokenizerFast
_snake_case : int = True
_snake_case : List[str] = True
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1004 )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCamelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowerCAmelCase__ , )
| 287 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Union[str, Any] = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCamelCase : List[Any] = 1_0
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
for i in range(lowercase ,lowercase ):
if array[i] == target:
return i
return -1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Optional[Any] = len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : List[str] = (left + right) // 3 + 1
snake_case : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
snake_case : List[str] = one_third - 1
elif array[two_third] < target:
snake_case : Any = two_third + 1
else:
snake_case : Dict = one_third + 1
snake_case : Any = two_third - 1
else:
return -1
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(lowercase ,lowercase ,lowercase ,lowercase )
snake_case : str = (left + right) // 3 + 1
snake_case : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase ,one_third - 1 ,lowercase ,lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,lowercase ,lowercase ,lowercase )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,lowercase ,lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : str = input('Enter numbers separated by comma:\n').strip()
lowerCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCamelCase : int = int(input('Enter the number to be found in the list:\n').strip())
lowerCamelCase : Tuple = ite_ternary_search(collection, target)
lowerCamelCase : Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 124 | 1 |
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
while number > 0:
_UpperCAmelCase = number % 1_0
sum_of_digits += last_digit
_UpperCAmelCase = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase__ ( a__: int = 1_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = factorial(a__ )
_UpperCAmelCase = split_and_add(a__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 365 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :List[str] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Any = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _a ( SCREAMING_SNAKE_CASE : int = "laptop" ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = F"https://www.amazon.in/laptop/s?k={product}"
UpperCamelCase__ : Optional[int] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
UpperCamelCase__ : List[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).text )
# Initialize a Pandas dataframe with the column titles
UpperCamelCase__ : Union[str, Any] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
UpperCamelCase__ : int = item.ha.text
UpperCamelCase__ : List[Any] = "https://www.amazon.in/" + item.ha.a["href"]
UpperCamelCase__ : List[Any] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
UpperCamelCase__ : Tuple = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
UpperCamelCase__ : Any = "Not available"
try:
UpperCamelCase__ : Any = (
"₹"
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
UpperCamelCase__ : int = ""
try:
UpperCamelCase__ : str = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
UpperCamelCase__ : Union[str, Any] = float('''nan''' )
except AttributeError:
pass
UpperCamelCase__ : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
UpperCamelCase__ : Optional[Any] = " "
UpperCamelCase__ : str = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = "headphones"
get_amazon_product_data(product).to_csv(f"Amazon Product Data for {product}.csv")
| 146 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def _A ( snake_case , snake_case=1.0 , snake_case=None , snake_case=None ) -> Optional[Any]:
if rng is None:
_lowercase : List[str] = global_rng
_lowercase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=400 , _UpperCamelCase=2000 , _UpperCamelCase=10 , _UpperCamelCase=160 , _UpperCamelCase=8 , _UpperCamelCase=0.0 , _UpperCamelCase=4000 , _UpperCamelCase=False , _UpperCamelCase=True , ):
"""simple docstring"""
_lowercase : int = parent
_lowercase : Optional[int] = batch_size
_lowercase : List[Any] = min_seq_length
_lowercase : Union[str, Any] = max_seq_length
_lowercase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase : Union[str, Any] = padding_value
_lowercase : Dict = sampling_rate
_lowercase : Any = return_attention_mask
_lowercase : Union[str, Any] = do_normalize
_lowercase : int = feature_size
_lowercase : str = chunk_length
_lowercase : Any = hop_length
def _lowerCamelCase ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
def _flatten(_UpperCamelCase ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
_lowercase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowercase : Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowercase : Optional[Any] = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = WhisperFeatureExtractor if is_speech_available() else None
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = WhisperFeatureExtractionTester(self )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : List[Any] = feat_extract_first.save_pretrained(_UpperCamelCase )[0]
check_json_file_has_correct_format(_UpperCamelCase )
_lowercase : Tuple = self.feature_extraction_class.from_pretrained(_UpperCamelCase )
_lowercase : List[Any] = feat_extract_first.to_dict()
_lowercase : List[str] = feat_extract_second.to_dict()
_lowercase : Tuple = feat_extract_first.mel_filters
_lowercase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase : Optional[int] = os.path.join(_UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(_UpperCamelCase )
_lowercase : Any = self.feature_extraction_class.from_json_file(_UpperCamelCase )
_lowercase : List[Any] = feat_extract_first.to_dict()
_lowercase : str = feat_extract_second.to_dict()
_lowercase : List[str] = feat_extract_first.mel_filters
_lowercase : Optional[Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowercase : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowercase : Optional[Any] = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_lowercase : int = feature_extractor(_UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowercase : List[str] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_lowercase : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test batched
_lowercase : Dict = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
_lowercase : Optional[Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowercase : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowercase : List[str] = np.asarray(_UpperCamelCase )
_lowercase : Optional[Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
_lowercase : str = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
# Test truncation required
_lowercase : List[Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowercase : List[str] = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
_lowercase : Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowercase : Any = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs_truncated]
_lowercase : List[str] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
_lowercase : Union[str, Any] = feature_extractor(_UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
_lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
_lowercase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase : Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowercase : Optional[int] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_lowercase : Optional[int] = ds.sort("id" ).select(range(_UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_lowercase : str = self._load_datasamples(1 )
_lowercase : Union[str, Any] = WhisperFeatureExtractor()
_lowercase : Any = feature_extractor(_UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _UpperCamelCase , atol=1E-4 ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowercase : str = self._load_datasamples(1 )[0]
_lowercase : List[str] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
_lowercase : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(_UpperCamelCase ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCamelCase ) - 1 ) < 1E-3 ) )
| 250 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = math.inf , lowerCAmelCase__ = -math.inf , lowerCAmelCase__ = math.inf , lowerCAmelCase__ = -math.inf , lowerCAmelCase__ = False , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 0.01 , lowerCAmelCase__ = 1 , ):
UpperCAmelCase_ = False
UpperCAmelCase_ = search_prob
UpperCAmelCase_ = start_temperate
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
while not search_end:
UpperCAmelCase_ = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase_ = current_state
scores.append(A_ )
iterations += 1
UpperCAmelCase_ = None
UpperCAmelCase_ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase_ = random.randint(0 , len(A_ ) - 1 ) # picking a random neighbor
UpperCAmelCase_ = neighbors.pop(A_ )
UpperCAmelCase_ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase_ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase_ = picked_neighbor
else:
UpperCAmelCase_ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase_ = picked_neighbor
UpperCAmelCase_ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ) , A_ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (3 * x**2) - (6 * y)
lowerCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"{local_min.score()}"
)
lowerCamelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"{local_min.score()}"
)
| 358 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : str ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Union[str, Any] , *_UpperCAmelCase : Any , **_UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : int ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[str] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Union[str, Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : Dict , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Any , *_UpperCAmelCase : str , **_UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : int , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : str , *_UpperCAmelCase : int , **_UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Dict , *_UpperCAmelCase : Any , **_UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[str] , *_UpperCAmelCase : str , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Union[str, Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Optional[Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : List[str] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Union[str, Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : Any , **_UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Optional[Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[str] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Tuple , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Any , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : List[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : int ) -> int:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : Tuple , *_UpperCAmelCase : int , **_UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Any:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : int , *_UpperCAmelCase : Dict , **_UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
class lowercase__ ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = ['''flax''']
def __init__( self : int , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"] )
@classmethod
def lowercase__ ( cls : Optional[int] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["flax"] )
@classmethod
def lowercase__ ( cls : str , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"] )
| 241 | 0 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:List[Any] = PhobertTokenizer
_UpperCamelCase:int = False
def _snake_case ( self )-> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ =["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
lowerCamelCase_ =dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase_ =["""#version: 0.2""", """l à</w>"""]
lowerCamelCase_ ={"""unk_token""": """<unk>"""}
lowerCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> List[Any]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> List[str]:
lowerCamelCase_ ="""Tôi là VinAI Research"""
lowerCamelCase_ ="""T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ ="""Tôi là VinAI Research"""
lowerCamelCase_ ="""T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
lowerCamelCase_ =tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =tokens + [tokenizer.unk_token]
lowerCamelCase_ =[4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
| 154 |
from collections import namedtuple
__A : List[str] = namedtuple('from_to', 'from_ to')
__A : int = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_01, 10_00),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_04_54, 2_64.1_72),
'cubicyard': from_to(0.7_64_55, 1.3_07_95),
'cubicfoot': from_to(0.0_28, 35.31_47),
'cup': from_to(0.0_00_23_65_88, 42_26.75),
}
def __UpperCamelCase ( _A : float , _A : str , _A : str ) ->float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(_A ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(_A ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase :int = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
lowerCAmelCase :Optional[Any] = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def lowerCamelCase ( lowerCAmelCase : list[float] ):
"""simple docstring"""
__magic_name__ : Optional[int] = []
__magic_name__ : List[str] = len(snake_case__ )
for i in range(snake_case__ ):
__magic_name__ : List[str] = -1
for j in range(i + 1 , snake_case__ ):
if arr[i] < arr[j]:
__magic_name__ : Any = arr[j]
break
result.append(snake_case__ )
return result
def lowerCamelCase ( lowerCAmelCase : list[float] ):
"""simple docstring"""
__magic_name__ : Optional[Any] = []
for i, outer in enumerate(snake_case__ ):
__magic_name__ : int = -1
for inner in arr[i + 1 :]:
if outer < inner:
__magic_name__ : Union[str, Any] = inner
break
result.append(snake_case__ )
return result
def lowerCamelCase ( lowerCAmelCase : list[float] ):
"""simple docstring"""
__magic_name__ : str = len(snake_case__ )
__magic_name__ : Dict = []
__magic_name__ : Optional[int] = [-1] * arr_size
for index in reversed(range(snake_case__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__magic_name__ : List[str] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase :Optional[int] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
) | 365 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
return "".join(chr(ord(lowerCAmelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod() | 275 | 0 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int ) -> Optional[Any]:
UpperCAmelCase : Dict = old_name
if "patch_embed" in old_name:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = old_name.split('''.''' )
if layer == "0":
UpperCAmelCase : Dict = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
UpperCAmelCase : List[Any] = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
UpperCAmelCase : Any = old_name.replace('''3''' , '''convolution2''' )
else:
UpperCAmelCase : Tuple = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCAmelCase ):
UpperCAmelCase : int = R'''\b\d{2}\b'''
if bool(re.search(_lowerCAmelCase , _lowerCAmelCase ) ):
UpperCAmelCase : Optional[int] = re.search(R'''\d\.\d\d.''' , _lowerCAmelCase ).group()
else:
UpperCAmelCase : str = re.search(R'''\d\.\d.''' , _lowerCAmelCase ).group()
if int(match[0] ) < 6:
UpperCAmelCase : int = old_name.replace(_lowerCAmelCase , '''''' )
UpperCAmelCase : int = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
UpperCAmelCase : int = '''intermediate_stages.''' + trimmed_name
else:
UpperCAmelCase : str = old_name.replace(_lowerCAmelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
UpperCAmelCase : Optional[int] = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
UpperCAmelCase : Any = str(int(match[2] ) - num_meta4D_last_stage )
UpperCAmelCase : str = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
UpperCAmelCase : str = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
UpperCAmelCase : Union[str, Any] = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
UpperCAmelCase : Any = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
UpperCAmelCase : List[str] = trimmed_name.replace('''fc2''' , '''linear_out''' )
UpperCAmelCase : Tuple = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCAmelCase ):
UpperCAmelCase : Any = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
UpperCAmelCase : Dict = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
UpperCAmelCase : Union[str, Any] = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
UpperCAmelCase : Dict = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
UpperCAmelCase : Optional[Any] = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
UpperCAmelCase : List[str] = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
UpperCAmelCase : List[str] = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
UpperCAmelCase : Tuple = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
UpperCAmelCase : List[str] = new_name.replace('''norm''' , '''layernorm''' )
UpperCAmelCase : Optional[Any] = '''efficientformer.''' + new_name
else:
UpperCAmelCase : Dict = '''efficientformer.encoder.''' + new_name
return new_name
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ) -> List[Any]:
for key in checkpoint.copy().keys():
UpperCAmelCase : str = checkpoint.pop(_lowerCAmelCase )
UpperCAmelCase : Tuple = val
return checkpoint
def snake_case_ ( ) -> Dict:
UpperCAmelCase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Any = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return image
def snake_case_ ( _lowerCAmelCase : Path , _lowerCAmelCase : Path , _lowerCAmelCase : Path , _lowerCAmelCase : bool ) -> Any:
UpperCAmelCase : Tuple = torch.load(_lowerCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : Optional[Any] = EfficientFormerConfig.from_json_file(_lowerCAmelCase )
UpperCAmelCase : List[Any] = EfficientFormerForImageClassificationWithTeacher(_lowerCAmelCase )
UpperCAmelCase : List[Any] = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
UpperCAmelCase : int = config.depths[-1] - config.num_metaad_blocks + 1
UpperCAmelCase : int = convert_torch_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
UpperCAmelCase : Any = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : int = 256
UpperCAmelCase : Optional[Any] = 224
UpperCAmelCase : List[str] = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
UpperCAmelCase : Optional[Any] = processor(images=_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
UpperCAmelCase : Optional[Any] = Compose(
[
Resize(_lowerCAmelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCAmelCase ),
ToTensor(),
Normalize(_lowerCAmelCase , _lowerCAmelCase ),
] )
UpperCAmelCase : Dict = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[int] = model(_lowerCAmelCase )
UpperCAmelCase : List[str] = outputs.logits
UpperCAmelCase : str = (1, 1000)
if "l1" in model_name:
UpperCAmelCase : Union[str, Any] = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
UpperCAmelCase : int = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , _lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
UpperCAmelCase : Tuple = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_lowerCAmelCase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add model''' , use_temp_dir=_lowerCAmelCase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='''Add image processor''' , use_temp_dir=_lowerCAmelCase , )
if __name__ == "__main__":
UpperCamelCase__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase__: Tuple = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 23 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = cos(_lowerCAmelCase )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : Any = (1 - _cos) / 2
UpperCAmelCase : List[Any] = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Dict = 1 - alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Tuple = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = _sin / 2
UpperCAmelCase : Any = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : Optional[Any] = 1 + alpha
UpperCAmelCase : List[Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : List[str] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : str = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 1 - alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : str = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Union[str, Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha * big_a
UpperCAmelCase : Union[str, Any] = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : Any = 1 - alpha / big_a
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : str = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Dict = big_a * (pmc + aaa)
UpperCAmelCase : Any = 2 * big_a * mpc
UpperCAmelCase : Union[str, Any] = big_a * (pmc - aaa)
UpperCAmelCase : Optional[int] = ppmc + aaa
UpperCAmelCase : Optional[Any] = -2 * pmpc
UpperCAmelCase : Optional[Any] = ppmc - aaa
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : int = 10 ** (gain_db / 40)
UpperCAmelCase : List[str] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Any = big_a * (ppmc + aaa)
UpperCAmelCase : str = -2 * big_a * pmpc
UpperCAmelCase : List[Any] = big_a * (ppmc - aaa)
UpperCAmelCase : Optional[Any] = pmc + aaa
UpperCAmelCase : Any = 2 * mpc
UpperCAmelCase : str = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 23 | 1 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : List[Any] ={
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
snake_case_ =numpy.vectorize(lambda lowerCamelCase__: x % 36)
snake_case_ =numpy.vectorize(lowerCamelCase__)
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.modulus(__lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase__ : Optional[int] = encrypt_key.shape[0]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
return self.key_string.index(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
return self.key_string[round(__lowerCamelCase )]
def lowerCAmelCase__ (self ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : str = det % len(self.key_string )
lowerCAmelCase__ : Optional[Any] = len(self.key_string )
if greatest_common_divisor(__lowerCamelCase ,len(self.key_string ) ) != 1:
lowerCAmelCase__ : List[str] = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase__ : int = chars[-1]
while len(__lowerCamelCase ) % self.break_key != 0:
chars.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : str = ''''''
for i in range(0 ,len(__lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : Any = text[i : i + self.break_key]
lowerCAmelCase__ : Dict = [self.replace_letters(__lowerCamelCase ) for char in batch]
lowerCAmelCase__ : int = numpy.array([vec] ).T
lowerCAmelCase__ : Union[str, Any] = self.modulus(self.encrypt_key.dot(__lowerCamelCase ) ).T.tolist()[
0
]
lowerCAmelCase__ : Union[str, Any] = ''''''.join(
self.replace_digits(__lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ (self ) -> numpy.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : int = det % len(self.key_string )
lowerCAmelCase__ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase__ : Optional[Any] = i
break
lowerCAmelCase__ : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowerCamelCase ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.make_decrypt_key()
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : Optional[Any] = ''''''
for i in range(0 ,len(__lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : List[Any] = text[i : i + self.break_key]
lowerCAmelCase__ : Tuple = [self.replace_letters(__lowerCamelCase ) for char in batch]
lowerCAmelCase__ : Optional[Any] = numpy.array([vec] ).T
lowerCAmelCase__ : Tuple = self.modulus(decrypt_key.dot(__lowerCamelCase ) ).T.tolist()[0]
lowerCAmelCase__ : Optional[int] = ''''''.join(
self.replace_digits(__lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = int(input('''Enter the order of the encryption key: '''))
lowerCAmelCase__ : Union[str, Any] = []
print('''Enter each row of the encryption key with space separated integers''')
for _ in range(lowerCamelCase_):
lowerCAmelCase__ : int = [int(lowerCamelCase_) for x in input().split()]
hill_matrix.append(lowerCamelCase_)
lowerCAmelCase__ : List[str] = HillCipher(numpy.array(lowerCamelCase_))
print('''Would you like to encrypt or decrypt some text? (1 or 2)''')
lowerCAmelCase__ : List[Any] = input('''\n1. Encrypt\n2. Decrypt\n''')
if option == "1":
lowerCAmelCase__ : Optional[int] = input('''What text would you like to encrypt?: ''')
print('''Your encrypted text is:''')
print(hc.encrypt(lowerCamelCase_))
elif option == "2":
lowerCAmelCase__ : Dict = input('''What text would you like to decrypt?: ''')
print('''Your decrypted text is:''')
print(hc.decrypt(lowerCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94 | 1 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowercase : Optional[Any] = pd.read_csv('sample_data.csv', header=None)
__lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowercase : Tuple = df.iloc[:, 1:2]
__lowercase : List[Any] = actual_data.values.reshape(len_data, 1)
__lowercase : List[Any] = MinMaxScaler().fit_transform(actual_data)
__lowercase : Dict = 10
__lowercase : Optional[Any] = 5
__lowercase : Tuple = 20
__lowercase : Any = len_data - periods * look_back
__lowercase : str = actual_data[:division]
__lowercase : List[Any] = actual_data[division - look_back :]
__lowercase , __lowercase : List[Any] = [], []
__lowercase , __lowercase : str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowercase : Optional[int] = np.array(train_x)
__lowercase : Optional[Any] = np.array(test_x)
__lowercase : int = np.array([list(i.ravel()) for i in train_y])
__lowercase : Optional[int] = np.array([list(i.ravel()) for i in test_y])
__lowercase : Dict = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__lowercase : List[Any] = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__lowercase : List[str] = model.predict(x_test)
| 27 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = BlenderbotConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ):
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : int = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : int = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : Optional[Any] = eos_token_id
_lowerCamelCase : Dict = pad_token_id
_lowerCamelCase : Dict = bos_token_id
def A_ ( self ):
_lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase : int = prepare_blenderbot_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : List[Any] = TFBlenderbotModel(config=lowercase ).get_decoder()
_lowerCamelCase : Dict = inputs_dict['input_ids']
_lowerCamelCase : List[Any] = input_ids[:1, :]
_lowerCamelCase : List[str] = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase : Tuple = inputs_dict['head_mask']
_lowerCamelCase : Optional[Any] = 1
# first forward pass
_lowerCamelCase : Tuple = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
_lowerCamelCase : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : Dict = model(lowercase , attention_mask=lowercase )[0]
_lowerCamelCase : int = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : int = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
if attention_mask is None:
_lowerCamelCase : List[Any] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCamelCase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
def A_ ( self ):
_lowerCamelCase : Optional[Any] = TFBlenderbotModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ["""My friends are cool but they eat too many carbs."""]
lowerCamelCase__ = """facebook/blenderbot-400M-distill"""
@cached_property
def A_ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = self.tokenizer(self.src_text , return_tensors='tf' )
_lowerCamelCase : str = self.model.generate(
model_inputs.input_ids , )
_lowerCamelCase : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
) | 350 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 12 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowercase__ = logging.get_logger(__name__)
class __snake_case ( __lowerCAmelCase ):
def __init__( self , *lowercase , **lowercase) -> None:
'''simple docstring'''
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase)
| 290 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase__ = logging.getLogger(__name__)
class __snake_case :
def __init__( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
if not self.initialized:
a__: Optional[int] = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Optional[int] = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: str = self.retriever._main_retrieve(lowercase , lowercase)
return doc_ids, retrieved_doc_embeds
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> int:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowercase) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ')
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Any = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase)
for worker in self.retrieval_workers
])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
logger.info('initializing retrieval')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
a__: int = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
a__ , a__: List[Any] = ray.get(random_worker.retrieve.remote(lowercase , lowercase))
else:
a__ , a__: Dict = self._main_retrieve(lowercase , lowercase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
return super(lowercase , cls).get_tokenizers(lowercase , lowercase , **lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , lowercase=None , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = kwargs.pop('config' , lowercase) or RagConfig.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = RagTokenizer.from_pretrained(lowercase , config=lowercase)
a__: int = rag_tokenizer.question_encoder
a__: Any = rag_tokenizer.generator
if indexed_dataset is not None:
a__: List[Any] = 'custom'
a__: Optional[Any] = CustomHFIndex(config.retrieval_vector_size , lowercase)
else:
a__: Dict = cls._build_index(lowercase)
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 290 | 1 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Any = VQModel
_A : int = '''sample'''
@property
def lowerCAmelCase ( self : Union[str, Any] , __a : List[str]=(32, 32) ) -> str:
"""simple docstring"""
__lowercase : str = 4
__lowercase : List[str] = 3
__lowercase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
__lowercase : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : str = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__a )
__lowercase : int = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__a ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__lowercase : str = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__lowercase : Any = image.to(__a )
with torch.no_grad():
__lowercase : List[Any] = model(__a ).sample
__lowercase : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowercase : Union[str, Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) ) | 306 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 306 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=3 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : int=[1, 2, 1] , UpperCamelCase_ : Optional[int]=[2, 2, 4] , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2.0 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Tuple=1E-5 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=1_0 , UpperCamelCase_ : Dict=8 , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : List[str] = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : Any = num_heads
lowerCAmelCase : int = window_size
lowerCAmelCase : List[Any] = mlp_ratio
lowerCAmelCase : int = qkv_bias
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : str = drop_path_rate
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Union[str, Any] = patch_norm
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = scope
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : Union[str, Any] = encoder_stride
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
lowerCAmelCase : List[str] = SwinvaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
lowerCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = SwinvaForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[str] = SwinvaForMaskedImageModeling(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = self.type_sequence_label_size
lowerCAmelCase : Optional[Any] = SwinvaForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = config_and_inputs
lowerCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCamelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = SwinvaModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=3_7 )
def lowerCamelCase__ ( self : Optional[int] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def lowerCamelCase__ ( self : Dict ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
lowerCAmelCase : Any = True
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = True
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : str = outputs.attentions
lowerCAmelCase : int = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : Any = True
lowerCAmelCase : Union[str, Any] = config.window_size**2
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase : str = len(UpperCamelCase_ )
# Check attention is always last and order is fine
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : int = True
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowerCAmelCase : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) )
lowerCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : str = outputs.hidden_states
lowerCAmelCase : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# Swinv2 has a different seq_length
lowerCAmelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = reshaped_hidden_states[0].shape
lowerCAmelCase : Optional[Any] = (
reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Tuple = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Dict = 3
lowerCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = SwinvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Dict ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
UpperCamelCase_ )
lowerCAmelCase : List[Any] = self.default_image_processor
lowerCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Dict = model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase : Any = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 60 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : int , UpperCamelCase_ : VQModel , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[int] , ):
lowerCAmelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : List[str] = {}
if accepts_eta:
lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase : List[str] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCAmelCase : Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase : Dict = self.vqvae.decode(UpperCamelCase_ ).sample
lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 60 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : List[str] ) -> Optional[int]:
lowerCAmelCase__ = "laion/clap-htsat-unfused"
lowerCAmelCase__ = tempfile.mkdtemp()
def a ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Dict:
return RobertaTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def a ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCAmelCase__ = self.get_feature_extractor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
lowerCAmelCase__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
def a ( self : Any ) -> int:
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = floats_list((3, 1_000) )
lowerCAmelCase__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
lowerCAmelCase__ = processor(audios=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = "This is a test string"
lowerCAmelCase__ = processor(text=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self : Any ) -> Optional[Any]:
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase__ = self.get_feature_extractor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 221 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
UpperCamelCase = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
UpperCamelCase = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
UpperCamelCase = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
UpperCamelCase = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
UpperCamelCase = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = DPRContextEncoderTokenizer
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = DPRQuestionEncoderTokenizer
UpperCamelCase = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
UpperCamelCase = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
UpperCamelCase = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCamelCase__ )
class __lowerCamelCase :
"""simple docstring"""
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles]
lowerCAmelCase__ = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts]
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE__ ) == len(
SCREAMING_SNAKE_CASE__ ), f'There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.'
lowerCAmelCase__ = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
lowerCAmelCase__ = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )["input_ids"]
lowerCAmelCase__ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ = attention_mask
return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : BatchEncoding , SCREAMING_SNAKE_CASE__ : DPRReaderOutput , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 4 , ) -> List[DPRSpanPrediction]:
lowerCAmelCase__ = reader_input["input_ids"]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ )
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ) -> List[DPRSpanPrediction]:
lowerCAmelCase__ = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
lowerCAmelCase__ = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = DPRReaderTokenizer
| 221 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "unispeech"
def __init__(self : Any , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : List[str]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-5 , UpperCAmelCase_ : str="group" , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Tuple=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase_ : str=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : Any=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=128 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=0.05 , UpperCAmelCase_ : Optional[int]=10 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=100 , UpperCAmelCase_ : Any=256 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="mean" , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=256 , UpperCAmelCase_ : Optional[int]=80 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Dict=0.5 , **UpperCAmelCase_ : Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =hidden_size
lowerCamelCase__: List[str] =feat_extract_norm
lowerCamelCase__: Dict =feat_extract_activation
lowerCamelCase__: Optional[Any] =list(UpperCAmelCase_)
lowerCamelCase__: Any =list(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =list(UpperCAmelCase_)
lowerCamelCase__: Dict =conv_bias
lowerCamelCase__: Optional[Any] =num_conv_pos_embeddings
lowerCamelCase__: Dict =num_conv_pos_embedding_groups
lowerCamelCase__: int =len(self.conv_dim)
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Union[str, Any] =intermediate_size
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Dict =hidden_dropout
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Optional[Any] =activation_dropout
lowerCamelCase__: Tuple =feat_proj_dropout
lowerCamelCase__: int =final_dropout
lowerCamelCase__: Optional[Any] =layerdrop
lowerCamelCase__: Dict =layer_norm_eps
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: int =num_ctc_classes
lowerCamelCase__: Tuple =vocab_size
lowerCamelCase__: Dict =do_stable_layer_norm
lowerCamelCase__: List[Any] =use_weighted_layer_sum
lowerCamelCase__: Dict =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__: int =apply_spec_augment
lowerCamelCase__: List[str] =mask_time_prob
lowerCamelCase__: Union[str, Any] =mask_time_length
lowerCamelCase__: List[Any] =mask_time_min_masks
lowerCamelCase__: Any =mask_feature_prob
lowerCamelCase__: Optional[Any] =mask_feature_length
lowerCamelCase__: List[str] =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__: Optional[Any] =num_codevectors_per_group
lowerCamelCase__: str =num_codevector_groups
lowerCamelCase__: Tuple =contrastive_logits_temperature
lowerCamelCase__: int =feat_quantizer_dropout
lowerCamelCase__: Any =num_negatives
lowerCamelCase__: List[str] =codevector_dim
lowerCamelCase__: Union[str, Any] =proj_codevector_dim
lowerCamelCase__: Any =diversity_loss_weight
# ctc loss
lowerCamelCase__: Any =ctc_loss_reduction
lowerCamelCase__: Dict =ctc_zero_infinity
# pretraining loss
lowerCamelCase__: Dict =replace_prob
@property
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 10 | 1 |
def A_ ( snake_case_ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(snake_case_ ,(list, tuple) ) or not all(
isinstance(snake_case_ ,snake_case_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
UpperCamelCase : int = numbers[0]
for i in range(1 ,len(snake_case_ ) ):
# update the maximum and minimum subarray products
UpperCamelCase : List[str] = numbers[i]
if number < 0:
UpperCamelCase : Optional[int] = min_till_now, max_till_now
UpperCamelCase : Dict = max(snake_case_ ,max_till_now * number )
UpperCamelCase : Union[str, Any] = min(snake_case_ ,min_till_now * number )
# update the maximum product found till now
UpperCamelCase : Union[str, Any] = max(snake_case_ ,snake_case_ )
return max_prod
| 361 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Optional[int] = 'mvp'
lowercase : Optional[Any] = ['past_key_values']
lowercase : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_0267 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=800 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Any = encoder_layers
UpperCamelCase : List[Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Dict = decoder_attention_heads
UpperCamelCase : List[str] = dropout
UpperCamelCase : List[str] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : Dict = activation_function
UpperCamelCase : List[str] = init_std
UpperCamelCase : int = encoder_layerdrop
UpperCamelCase : Dict = decoder_layerdrop
UpperCamelCase : Any = classifier_dropout
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Dict = encoder_layers
UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase : Optional[Any] = use_prompt
UpperCamelCase : Any = prompt_length
UpperCamelCase : List[Any] = prompt_mid_dim
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
| 27 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_A = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
_A = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
_A = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return float((preds == labels).mean() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =float(fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =float(pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
__UpperCamelCase =float(spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _a ( self ) -> Optional[int]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def _a ( self , A_ , A_ ) -> Dict:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(A_ , A_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(A_ , A_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(A_ , A_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(A_ , A_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 62 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "yolos"
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=[512, 864] , A_=16 , A_=3 , A_=True , A_=100 , A_=True , A_=False , A_=1 , A_=5 , A_=2 , A_=5 , A_=2 , A_=0.1 , **A_ , ) -> Any:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =num_detection_tokens
__UpperCamelCase =use_mid_position_embeddings
__UpperCamelCase =auxiliary_loss
# Hungarian matcher
__UpperCamelCase =class_cost
__UpperCamelCase =bbox_cost
__UpperCamelCase =giou_cost
# Loss coefficients
__UpperCamelCase =bbox_loss_coefficient
__UpperCamelCase =giou_loss_coefficient
__UpperCamelCase =eos_coefficient
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self ) -> float:
return 1E-4
@property
def _a ( self ) -> int:
return 12
| 62 | 1 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=False , __a=False , __a=False , __a=2 , __a=99 , __a=0 , __a=32 , __a=5 , __a=4 , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=2 , __a=4 , __a="last" , __a=True , __a=None , __a=0 , ) -> Optional[Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_lengths
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = gelu_activation
UpperCamelCase = sinusoidal_embeddings
UpperCamelCase = causal
UpperCamelCase = asm
UpperCamelCase = n_langs
UpperCamelCase = vocab_size
UpperCamelCase = n_special
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = summary_type
UpperCamelCase = use_proj
UpperCamelCase = scope
UpperCamelCase = bos_token_id
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_input_lengths:
UpperCamelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case_ (self ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Dict:
UpperCamelCase = XLMModel(config=__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a , lengths=__a , langs=__a )
UpperCamelCase = model(__a , langs=__a )
UpperCamelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
UpperCamelCase = XLMWithLMHeadModel(__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Optional[int]:
UpperCamelCase = XLMForQuestionAnsweringSimple(__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a )
UpperCamelCase = model(__a , start_positions=__a , end_positions=__a )
UpperCamelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Tuple:
UpperCamelCase = XLMForQuestionAnswering(__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a )
UpperCamelCase = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , p_mask=__a , )
UpperCamelCase = model(
__a , start_positions=__a , end_positions=__a , cls_index=__a , is_impossible=__a , )
((UpperCamelCase) , ) = result_with_labels.to_tuple()
UpperCamelCase = model(__a , start_positions=__a , end_positions=__a )
((UpperCamelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> int:
UpperCamelCase = XLMForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a )
UpperCamelCase = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> List[str]:
UpperCamelCase = self.num_labels
UpperCamelCase = XLMForTokenClassification(__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ (self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Optional[int]:
UpperCamelCase = self.num_choices
UpperCamelCase = XLMForMultipleChoice(config=__a )
model.to(__a )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase_ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase_ = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ (self , __a , __a , __a , __a , __a ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case_ (self , __a , __a , __a=False ) -> Union[str, Any]:
UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = XLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__a , emb_dim=37 )
def snake_case_ (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*__a )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*__a )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*__a )
def snake_case_ (self ) -> str:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*__a )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*__a )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*__a )
def snake_case_ (self ) -> int:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*__a )
def snake_case_ (self , __a , __a , __a , __a , __a , __a=False , __a=1 ) -> Dict:
self.assertIsInstance(__a , __a )
self.assertListEqual(
[isinstance(__a , __a ) for iter_attentions in attentions] , [True] * len(__a ) )
self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(__a ):
# adds PAD dummy token
UpperCamelCase = min_length + idx + 1
UpperCamelCase = min_length + idx + 1
UpperCamelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__a ) )
def snake_case_ (self , __a , __a , __a , __a , __a , __a=False , __a=1 ) -> Dict:
self.assertIsInstance(__a , __a )
self.assertListEqual(
[isinstance(__a , __a ) for iter_hidden_states in hidden_states] , [True] * len(__a ) , )
self.assertEqual(len(__a ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(__a ):
# adds PAD dummy token
UpperCamelCase = min_length + idx + 1
UpperCamelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__a ) , )
pass
@slow
def snake_case_ (self ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = XLMModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case_ (self ) -> List[str]:
UpperCamelCase = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(__a )
UpperCamelCase = torch.tensor([[14, 4_47]] , dtype=torch.long , device=__a ) # the president
UpperCamelCase = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __a )
| 244 |
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a = None , __a = None , __a = False , **__a , ) -> List[Any]:
super().__init__(features=__a , cache_dir=__a , keep_in_memory=__a , **__a )
UpperCamelCase = Sql(
cache_dir=__a , features=__a , sql=__a , con=__a , **__a , )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , )
# Build dataset for splits
UpperCamelCase = self.builder.as_dataset(
split="train" , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
class _lowerCamelCase :
def __init__(self , __a , __a , __a , __a = None , __a = None , **__a , ) -> Tuple:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
UpperCamelCase = dataset
UpperCamelCase = name
UpperCamelCase = con
UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase = num_proc
UpperCamelCase = to_sql_kwargs
def snake_case_ (self ) -> int:
UpperCamelCase = self.to_sql_kwargs.pop("sql" , __a )
UpperCamelCase = self.to_sql_kwargs.pop("con" , __a )
UpperCamelCase = self.to_sql_kwargs.pop("index" , __a )
UpperCamelCase = self._write(index=__a , **self.to_sql_kwargs )
return written
def snake_case_ (self , __a ) -> Any:
UpperCamelCase , UpperCamelCase , UpperCamelCase = args
UpperCamelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase = batch.to_pandas()
UpperCamelCase = df.to_sql(self.name , self.con , index=__a , **__a )
return num_rows or len(__a )
def snake_case_ (self , __a , **__a ) -> int:
UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase , UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 244 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def count_of_possible_combinations(SCREAMING_SNAKE_CASE ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ : Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE )
for item in array )
A_ : Optional[int] = answer
return answer
A_ : Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = [0] * (target + 1)
A_ : Optional[Any] = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = 3
UpperCamelCase = 5
UpperCamelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 186 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=sys.maxsize )->Any:
'''simple docstring'''
A_ : Dict = '''bilinear'''
A_ : Optional[Any] = max_size
A_ : Optional[Any] = short_edge_length
def __call__( self , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : str = []
for img in imgs:
A_ , A_ : List[str] = img.shape[:2]
# later: provide list and randomly choose index for resize
A_ : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A_ : int = size * 1.0 / min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if h < w:
A_ , A_ : Tuple = size, scale * w
else:
A_ , A_ : List[str] = scale * h, size
if max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > self.max_size:
A_ : List[Any] = self.max_size * 1.0 / max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = newh * scale
A_ : List[str] = neww * scale
A_ : List[Any] = int(neww + 0.5 )
A_ : Tuple = int(newh + 0.5 )
if img.dtype == np.uinta:
A_ : List[str] = Image.fromarray(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A_ : Dict = np.asarray(_SCREAMING_SNAKE_CASE )
else:
A_ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A_ : List[str] = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=_SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(_SCREAMING_SNAKE_CASE )
return img_augs
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : Tuple = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A_ : Union[str, Any] = cfg.INPUT.FORMAT
A_ : int = cfg.SIZE_DIVISIBILITY
A_ : Tuple = cfg.PAD_VALUE
A_ : List[Any] = cfg.INPUT.MAX_SIZE_TEST
A_ : List[str] = cfg.MODEL.DEVICE
A_ : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A_ : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A_ : List[Any] = lambda _SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : Any = tuple(max(_SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
A_ : List[Any] = [im.shape[-2:] for im in images]
A_ : Any = [
nn.functional.pad(
_SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return torch.stack(_SCREAMING_SNAKE_CASE ), torch.tensor(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Dict:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Dict = [images]
if single_image:
assert len(_SCREAMING_SNAKE_CASE ) == 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_SCREAMING_SNAKE_CASE , images.pop(_SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(_SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A_ : List[str] = torch.tensor([im.shape[:2] for im in images] )
A_ : Union[str, Any] = self.aug(_SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A_ : List[str] = [self.normalizer(_SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
A_ , A_ : Any = self.pad(_SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A_ : str = torch.true_divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert torch.isfinite(SCREAMING_SNAKE_CASE ).all(), "Box tensor contains infinite or NaN!"
A_ , A_ : int = box_size
tensor[:, 0].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 1].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 2].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
tensor[:, 3].clamp_(min=0 , max=SCREAMING_SNAKE_CASE )
| 186 | 1 |
from __future__ import annotations
__lowerCamelCase : Optional[int] = """#"""
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : dict = {}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._trie
for char in text:
if char not in trie:
UpperCamelCase : Any = {}
UpperCamelCase : int = trie[char]
UpperCamelCase : Dict = True
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._trie
for char in prefix:
if char in trie:
UpperCamelCase : List[Any] = trie[char]
else:
return []
return self._elements(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = []
for c, v in d.items():
UpperCamelCase : List[Any] = [" "] if c == END else [(c + s) for s in self._elements(A_ )]
result.extend(A_ )
return tuple(A_ )
__lowerCamelCase : Dict = Trie()
__lowerCamelCase : List[Any] = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def A_ ( _lowerCAmelCase ) -> tuple:
UpperCamelCase : Optional[int] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def A_ ( ) -> None:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 140 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : str = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 140 | 1 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = credit_card_number
a_ = 0
a_ = len(__lowercase ) - 2
for i in range(__lowercase , -1 , -2 ):
# double the value of every second digit
a_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a_ = cc_number[:i] + str(__lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowercase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def UpperCamelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
a_ = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__lowercase ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__lowercase ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__lowercase ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323') | 243 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod() | 282 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 360 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'transfo-xl'
__UpperCamelCase = ['mems']
__UpperCamelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self , lowerCamelCase=267_735 , lowerCamelCase=[20_000, 40_000, 200_000] , lowerCamelCase=1_024 , lowerCamelCase=1_024 , lowerCamelCase=16 , lowerCamelCase=64 , lowerCamelCase=4_096 , lowerCamelCase=4 , lowerCamelCase=False , lowerCamelCase=18 , lowerCamelCase=1_600 , lowerCamelCase=1_000 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=0 , lowerCamelCase=-1 , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="normal" , lowerCamelCase=0.01 , lowerCamelCase=0.01 , lowerCamelCase=0.02 , lowerCamelCase=1e-5 , lowerCamelCase=0 , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = vocab_size
_lowerCAmelCase = []
self.cutoffs.extend(lowerCamelCase )
if proj_share_all_but_first:
_lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCAmelCase = [False] + [False] * len(self.cutoffs )
_lowerCAmelCase = d_model
_lowerCAmelCase = d_embed
_lowerCAmelCase = d_head
_lowerCAmelCase = d_inner
_lowerCAmelCase = div_val
_lowerCAmelCase = pre_lnorm
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = mem_len
_lowerCAmelCase = same_length
_lowerCAmelCase = attn_type
_lowerCAmelCase = clamp_len
_lowerCAmelCase = sample_softmax
_lowerCAmelCase = adaptive
_lowerCAmelCase = dropout
_lowerCAmelCase = dropatt
_lowerCAmelCase = untie_r
_lowerCAmelCase = init
_lowerCAmelCase = init_range
_lowerCAmelCase = proj_init_std
_lowerCAmelCase = init_std
_lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase , **lowerCamelCase )
@property
def A__ (self ):
'''simple docstring'''
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def A__ (self , lowerCamelCase ):
'''simple docstring'''
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) | 317 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = XLMTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def snake_case__( self : List[str] ) ->Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
snake_case_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_UpperCamelCase ) )
def snake_case__( self : Any , _UpperCamelCase : int ) ->Any:
snake_case_ = '''lower newer'''
snake_case_ = '''lower newer'''
return input_text, output_text
def snake_case__( self : Dict ) ->List[Any]:
snake_case_ = XLMTokenizer(self.vocab_file , self.merges_file )
snake_case_ = '''lower'''
snake_case_ = ['''low''', '''er</w>''']
snake_case_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
snake_case_ = tokens + ['''<unk>''']
snake_case_ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@slow
def snake_case__( self : Any ) ->Dict:
snake_case_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 8 | """simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''T5Config'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
| 69 | 0 |
def lowerCAmelCase__ ( a__ , a__ ) ->List[str]:
'''simple docstring'''
_UpperCamelCase = len(lowerCamelCase_ )
_UpperCamelCase = len(lowerCamelCase_ )
_UpperCamelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_UpperCamelCase = []
for char_count in range(lowerCamelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCamelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 358 | def lowerCAmelCase__ ( a__ , a__ ) ->str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
_UpperCamelCase = str(bin(a__ ) )[2:] # remove the leading "0b"
_UpperCamelCase = str(bin(a__ ) )[2:] # remove the leading "0b"
_UpperCamelCase = max(len(a__ ) , len(a__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(a__ ) , b_binary.zfill(a__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__SCREAMING_SNAKE_CASE : List[Any] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=UpperCamelCase_ )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case_ = tmp_path_factory.getbasetemp() / """cache"""
snake_case_ = test_hf_cache_home / """datasets"""
snake_case_ = test_hf_cache_home / """metrics"""
snake_case_ = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(UpperCamelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(UpperCamelCase_ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(UpperCamelCase_ ) )
snake_case_ = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(UpperCamelCase_ ) )
snake_case_ = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase_ ) )
@pytest.fixture(autouse=UpperCamelCase_ , scope="""session""" )
def _a ( ) -> List[str]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase_ )
def _a ( _SCREAMING_SNAKE_CASE ) -> Dict:
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , UpperCamelCase_ )
@pytest.fixture
def _a ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , UpperCamelCase_ )
| 347 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
UpperCamelCase = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
UpperCamelCase = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self : str ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
A: int = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
A: int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
A: Union[str, Any] = evaluate(dataset=__UpperCamelCase , predictions=__UpperCamelCase )
return score
| 360 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict:
return np.maximum(0 , __lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 334 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__lowercase = getattr(A__ , A__ )
if weight_type is not None:
__lowercase = getattr(A__ , A__ ).shape
else:
__lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
elif weight_type == "running_mean":
__lowercase = value
elif weight_type == "running_var":
__lowercase = value
elif weight_type == "num_batches_tracked":
__lowercase = value
elif weight_type == "inv_freq":
__lowercase = value
else:
__lowercase = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
__lowercase = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(A__ )[0].split('''.''' )[-2]
__lowercase = mapped_key.replace('''*''' , A__ )
if "pos_bias_u" in name:
__lowercase = None
elif "pos_bias_v" in name:
__lowercase = None
elif "weight_g" in name:
__lowercase = '''weight_g'''
elif "weight_v" in name:
__lowercase = '''weight_v'''
elif "bias" in name:
__lowercase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase = '''weight'''
elif "running_mean" in name:
__lowercase = '''running_mean'''
elif "inv_freq" in name:
__lowercase = '''inv_freq'''
elif "running_var" in name:
__lowercase = '''running_var'''
elif "num_batches_tracked" in name:
__lowercase = '''num_batches_tracked'''
else:
__lowercase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"Unused weights: {unused_weights}" )
def _A ( A__ , A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = full_name.split('''conv_layers.''' )[-1]
__lowercase = name.split('''.''' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A__ )
@torch.no_grad()
def _A ( A__ , A__ , A__=None , A__=None , A__=True ):
"""simple docstring"""
if config_path is not None:
__lowercase = WavaVecaConformerConfig.from_pretrained(A__ , hidden_act='''swish''' )
else:
__lowercase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase = '''rotary'''
if is_finetuned:
if dict_path:
__lowercase = Dictionary.load(A__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase = target_dict.pad_index
__lowercase = target_dict.bos_index
__lowercase = target_dict.eos_index
__lowercase = len(target_dict.symbols )
__lowercase = os.path.join(A__ , '''vocab.json''' )
if not os.path.isdir(A__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A__ ) )
return
os.makedirs(A__ , exist_ok=A__ )
__lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase = 0
__lowercase = 1
with open(A__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A__ , A__ )
__lowercase = WavaVecaCTCTokenizer(
A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A__ , )
__lowercase = True if config.feat_extract_norm == '''layer''' else False
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
__lowercase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ )
processor.save_pretrained(A__ )
__lowercase = WavaVecaConformerForCTC(A__ )
else:
__lowercase = WavaVecaConformerForPreTraining(A__ )
if is_finetuned:
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowercase = argparse.Namespace(task='''audio_pretraining''' )
__lowercase = fairseq.tasks.setup_task(A__ )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__ )
__lowercase = model[0].eval()
recursively_load_weights(A__ , A__ , not is_finetuned )
hf_wavavec.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 104 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCAmelCase : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def a__ ( a__ , a__=None , a__=None , a__=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
while ask_again:
__SCREAMING_SNAKE_CASE = input(a__ )
try:
if default is not None and len(a__ ) == 0:
return default
return convert_value(a__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a__ )
def a__ ( a__ , a__=[] , a__=None , a__=0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BulletMenu(a__ , a__ )
__SCREAMING_SNAKE_CASE = menu.run(default_choice=a__ )
return convert_value(a__ ) if convert_value is not None else result
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def a__ ( a__ ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class lowerCAmelCase__ ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super()._format_usage(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 267 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=224 , _UpperCAmelCase=1000 , _UpperCAmelCase=[3, 3, 6, 4] , _UpperCAmelCase=[48, 56, 112, 220] , ):
lowercase__: List[str] = parent
lowercase__: List[str] = batch_size
lowercase__: str = num_channels
lowercase__: Optional[int] = is_training
lowercase__: Optional[int] = use_labels
lowercase__: Tuple = hidden_dropout_prob
lowercase__: Optional[int] = attention_probs_dropout_prob
lowercase__: Optional[Any] = num_labels
lowercase__: List[Any] = image_size
lowercase__: Union[str, Any] = layer_depths
lowercase__: Optional[int] = embed_dims
def _snake_case ( self ):
lowercase__: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: Optional[Any] = None
if self.use_labels:
lowercase__: List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__: Dict = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = SwiftFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = self.num_labels
lowercase__: Any = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase__: List[Any] = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
((lowercase__), (lowercase__), (lowercase__)): List[str] = self.prepare_config_and_inputs()
lowercase__: Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Any = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_UpperCAmelCase :Any = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase :str = False
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Union[str, Any] = False
def _snake_case ( self ):
lowercase__: Dict = SwiftFormerModelTester(self )
lowercase__: int = ConfigTester(
self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__, lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Optional[Any] = model_class(_UpperCAmelCase )
lowercase__: List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _snake_case ( self ):
lowercase__, lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Optional[Any] = model_class(_UpperCAmelCase )
lowercase__: List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: Optional[int] = [*signature.parameters.keys()]
lowercase__: Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _snake_case ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[str] = SwiftFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase__: List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__: List[Any] = outputs.hidden_states
lowercase__: str = 8
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_UpperCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase__, lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__: Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
def _config_zero_init(_UpperCAmelCase ):
lowercase__: str = copy.deepcopy(_UpperCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_UpperCAmelCase , _UpperCAmelCase , 1e-1_0 )
if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ):
lowercase__: Optional[Any] = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return configs_no_init
lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: Union[str, Any] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase__: Optional[int] = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( ) -> int:
lowercase__: Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def _snake_case ( self ):
lowercase__: List[str] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_UpperCAmelCase )
lowercase__: Any = self.default_image_processor
lowercase__: Any = prepare_img()
lowercase__: int = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase__: Optional[int] = model(**_UpperCAmelCase )
# verify the logits
lowercase__: Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__: Union[str, Any] = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 2 | """simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 2 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A):
"""simple docstring"""
for i in range(1 , len(matrix[0])):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_lowerCAmelCase)):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_lowerCAmelCase)):
for j in range(1 , len(matrix[0])):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1])
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"{test_file} instead." )
snake_case__ : Dict = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
snake_case__ : int = components[:-1] + [test_fn.replace(""".py""" , """""" )]
snake_case__ : int = """.""".join(_lowerCAmelCase )
return test_module_path
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : str = get_module_path(_lowerCAmelCase )
snake_case__ : Union[str, Any] = importlib.import_module(_lowerCAmelCase )
return test_module
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = []
snake_case__ : Optional[int] = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[str] = []
snake_case__ : Any = get_test_module(_lowerCAmelCase )
for attr in dir(_lowerCAmelCase ):
snake_case__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(_lowerCAmelCase , """all_model_classes""" , [] )
if len(_lowerCAmelCase ) > 0:
test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : Any = get_test_classes(_lowerCAmelCase )
snake_case__ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Optional[Any]:
snake_case__ : Optional[int] = test_class()
if hasattr(_lowerCAmelCase , """setUp""" ):
test.setUp()
snake_case__ : Any = None
if hasattr(_lowerCAmelCase , """model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : Tuple = test.model_tester.__class__
return model_tester
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
snake_case__ : Union[str, Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[Any] = get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ : Union[str, Any] = []
for test_class in test_classes:
snake_case__ : Tuple = get_model_tester_from_test_class(_lowerCAmelCase )
if tester_class is not None:
tester_classes.append(_lowerCAmelCase )
# sort with class names
return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x.__name__ )
def __snake_case( _lowerCAmelCase ) -> Union[str, Any]:
snake_case__ : Optional[Any] = get_test_classes(_lowerCAmelCase )
snake_case__ : Union[str, Any] = {test_class: get_model_tester_from_test_class(_lowerCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Any = get_model_classes(_lowerCAmelCase )
snake_case__ : Any = {
model_class: get_test_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def __snake_case( _lowerCAmelCase ) -> Optional[int]:
snake_case__ : Union[str, Any] = get_model_classes(_lowerCAmelCase )
snake_case__ : str = {
model_class: get_tester_classes_for_model(_lowerCAmelCase , _lowerCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case( _lowerCAmelCase ) -> int:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return o.__name__
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return [to_json(_lowerCAmelCase ) for x in o]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {to_json(_lowerCAmelCase ): to_json(_lowerCAmelCase ) for k, v in o.items()}
else:
return o
| 35 | 0 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
return str(a__ ) == str(a__ )[::-1]
def a__ ( a__ ):
"""simple docstring"""
return int(a__ ) + int(str(a__ )[::-1] )
def a__ ( a__ = 1_00_00 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for num in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = num
while iterations < 50:
__SCREAMING_SNAKE_CASE = sum_reverse(a__ )
iterations += 1
if is_palindrome(a__ ):
break
else:
lychrel_nums.append(a__ )
return len(a__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368 |
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 331 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : str ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Any = len(snake_case__ ) # No of vertices in graph
UpperCAmelCase : Tuple = [0] * n
UpperCAmelCase : str = [False] * n
def dfs(UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : str ):
UpperCAmelCase : str = True
UpperCAmelCase : str = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(snake_case__ , snake_case__ , snake_case__ , id_ )
UpperCAmelCase : Tuple = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCAmelCase : Optional[Any] = min(low[at] , low[to] )
UpperCAmelCase : int = []
for i in range(snake_case__ ):
if not visited[i]:
dfs(snake_case__ , -1 , snake_case__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
def merge(snake_case__ ,snake_case__ ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(snake_case__ ) <= 1:
return collection
_SCREAMING_SNAKE_CASE = len(snake_case__ ) // 2
return merge(merge_sort(collection[:mid] ) ,merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 306 | 0 |
'''simple docstring'''
def _A ( _lowerCAmelCase = 3 , _lowerCAmelCase = 7 , _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =0
__lowercase =1
for current_denominator in range(1 , limit + 1 ):
__lowercase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowercase =current_numerator
__lowercase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 366 |
'''simple docstring'''
from __future__ import annotations
import requests
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowerCAmelCase ).json()
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase ='https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
__lowercase =requests.get(_lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_lowerCAmelCase ) for story_id in story_ids]
def _A ( _lowerCAmelCase = 10 ):
"""simple docstring"""
__lowercase =hackernews_top_stories(_lowerCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : torch.FloatTensor
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=3 ,__UpperCAmelCase=("DownEncoderBlock2D",) ,__UpperCAmelCase=(64,) ,__UpperCAmelCase=2 ,__UpperCAmelCase=32 ,__UpperCAmelCase="silu" ,__UpperCAmelCase=True ,) -> Optional[int]:
super().__init__()
lowerCAmelCase__ : Union[str, Any] = layers_per_block
lowerCAmelCase__ : Dict = torch.nn.Convad(
__UpperCAmelCase ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[Any] = nn.ModuleList([] )
# down
lowerCAmelCase__ : str = block_out_channels[0]
for i, down_block_type in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Any = output_channel
lowerCAmelCase__ : str = block_out_channels[i]
lowerCAmelCase__ : Union[str, Any] = i == len(__UpperCAmelCase ) - 1
lowerCAmelCase__ : Tuple = get_down_block(
__UpperCAmelCase ,num_layers=self.layers_per_block ,in_channels=__UpperCAmelCase ,out_channels=__UpperCAmelCase ,add_downsample=not is_final_block ,resnet_eps=1E-6 ,downsample_padding=0 ,resnet_act_fn=__UpperCAmelCase ,resnet_groups=__UpperCAmelCase ,attention_head_dim=__UpperCAmelCase ,temb_channels=__UpperCAmelCase ,)
self.down_blocks.append(__UpperCAmelCase )
# mid
lowerCAmelCase__ : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=__UpperCAmelCase ,output_scale_factor=1 ,resnet_time_scale_shift="""default""" ,attention_head_dim=block_out_channels[-1] ,resnet_groups=__UpperCAmelCase ,temb_channels=__UpperCAmelCase ,)
# out
lowerCAmelCase__ : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=__UpperCAmelCase ,eps=1E-6 )
lowerCAmelCase__ : List[str] = nn.SiLU()
lowerCAmelCase__ : int = 2 * out_channels if double_z else out_channels
lowerCAmelCase__ : Dict = nn.Convad(block_out_channels[-1] ,__UpperCAmelCase ,3 ,padding=1 )
lowerCAmelCase__ : Optional[int] = False
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = x
lowerCAmelCase__ : Optional[Any] = self.conv_in(__UpperCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase ):
def custom_forward(*__UpperCAmelCase ):
return module(*__UpperCAmelCase )
return custom_forward
# down
if is_torch_version(""">=""" ,"""1.11.0""" ):
for down_block in self.down_blocks:
lowerCAmelCase__ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) ,__UpperCAmelCase ,use_reentrant=__UpperCAmelCase )
# middle
lowerCAmelCase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,__UpperCAmelCase ,use_reentrant=__UpperCAmelCase )
else:
for down_block in self.down_blocks:
lowerCAmelCase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) ,__UpperCAmelCase )
# middle
lowerCAmelCase__ : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,__UpperCAmelCase )
else:
# down
for down_block in self.down_blocks:
lowerCAmelCase__ : Union[str, Any] = down_block(__UpperCAmelCase )
# middle
lowerCAmelCase__ : Optional[int] = self.mid_block(__UpperCAmelCase )
# post-process
lowerCAmelCase__ : str = self.conv_norm_out(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.conv_act(__UpperCAmelCase )
lowerCAmelCase__ : int = self.conv_out(__UpperCAmelCase )
return sample
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase=3 ,__UpperCAmelCase=3 ,__UpperCAmelCase=("UpDecoderBlock2D",) ,__UpperCAmelCase=(64,) ,__UpperCAmelCase=2 ,__UpperCAmelCase=32 ,__UpperCAmelCase="silu" ,__UpperCAmelCase="group" ,) -> List[Any]:
super().__init__()
lowerCAmelCase__ : Dict = layers_per_block
lowerCAmelCase__ : Any = nn.Convad(
__UpperCAmelCase ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Dict = nn.ModuleList([] )
lowerCAmelCase__ : Tuple = in_channels if norm_type == """spatial""" else None
# mid
lowerCAmelCase__ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=__UpperCAmelCase ,output_scale_factor=1 ,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=__UpperCAmelCase ,temb_channels=__UpperCAmelCase ,)
# up
lowerCAmelCase__ : Optional[int] = list(reversed(__UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = output_channel
lowerCAmelCase__ : int = reversed_block_out_channels[i]
lowerCAmelCase__ : Dict = i == len(__UpperCAmelCase ) - 1
lowerCAmelCase__ : Union[str, Any] = get_up_block(
__UpperCAmelCase ,num_layers=self.layers_per_block + 1 ,in_channels=__UpperCAmelCase ,out_channels=__UpperCAmelCase ,prev_output_channel=__UpperCAmelCase ,add_upsample=not is_final_block ,resnet_eps=1E-6 ,resnet_act_fn=__UpperCAmelCase ,resnet_groups=__UpperCAmelCase ,attention_head_dim=__UpperCAmelCase ,temb_channels=__UpperCAmelCase ,resnet_time_scale_shift=__UpperCAmelCase ,)
self.up_blocks.append(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = output_channel
# out
if norm_type == "spatial":
lowerCAmelCase__ : Optional[Any] = SpatialNorm(block_out_channels[0] ,__UpperCAmelCase )
else:
lowerCAmelCase__ : List[Any] = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=__UpperCAmelCase ,eps=1E-6 )
lowerCAmelCase__ : int = nn.SiLU()
lowerCAmelCase__ : Dict = nn.Convad(block_out_channels[0] ,__UpperCAmelCase ,3 ,padding=1 )
lowerCAmelCase__ : Optional[Any] = False
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = z
lowerCAmelCase__ : List[Any] = self.conv_in(__UpperCAmelCase )
lowerCAmelCase__ : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__UpperCAmelCase ):
def custom_forward(*__UpperCAmelCase ):
return module(*__UpperCAmelCase )
return custom_forward
if is_torch_version(""">=""" ,"""1.11.0""" ):
# middle
lowerCAmelCase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,__UpperCAmelCase ,__UpperCAmelCase ,use_reentrant=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__UpperCAmelCase ) ,__UpperCAmelCase ,__UpperCAmelCase ,use_reentrant=__UpperCAmelCase )
else:
# middle
lowerCAmelCase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : str = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(__UpperCAmelCase ) ,__UpperCAmelCase ,__UpperCAmelCase )
else:
# middle
lowerCAmelCase__ : Any = self.mid_block(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : int = sample.to(__UpperCAmelCase )
# up
for up_block in self.up_blocks:
lowerCAmelCase__ : Dict = up_block(__UpperCAmelCase ,__UpperCAmelCase )
# post-process
if latent_embeds is None:
lowerCAmelCase__ : Union[str, Any] = self.conv_norm_out(__UpperCAmelCase )
else:
lowerCAmelCase__ : List[Any] = self.conv_norm_out(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.conv_act(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.conv_out(__UpperCAmelCase )
return sample
class lowerCAmelCase_( nn.Module ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="random" ,__UpperCAmelCase=False ,__UpperCAmelCase=True ) -> List[Any]:
super().__init__()
lowerCAmelCase__ : List[str] = n_e
lowerCAmelCase__ : Dict = vq_embed_dim
lowerCAmelCase__ : List[Any] = beta
lowerCAmelCase__ : Union[str, Any] = legacy
lowerCAmelCase__ : Dict = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
lowerCAmelCase__ : int = remap
if self.remap is not None:
self.register_buffer("""used""" ,torch.tensor(np.load(self.remap ) ) )
lowerCAmelCase__ : List[Any] = self.used.shape[0]
lowerCAmelCase__ : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCAmelCase__ : Optional[int] = self.re_embed
lowerCAmelCase__ : Dict = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
lowerCAmelCase__ : List[Any] = n_e
lowerCAmelCase__ : Tuple = sane_index_shape
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = inds.shape
assert len(__UpperCAmelCase ) > 1
lowerCAmelCase__ : Optional[int] = inds.reshape(ishape[0] ,-1 )
lowerCAmelCase__ : Any = self.used.to(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = (inds[:, :, None] == used[None, None, ...]).long()
lowerCAmelCase__ : List[Any] = match.argmax(-1 )
lowerCAmelCase__ : Optional[int] = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCAmelCase__ : int = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
lowerCAmelCase__ : Union[str, Any] = self.unknown_index
return new.reshape(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> str:
lowerCAmelCase__ : Dict = inds.shape
assert len(__UpperCAmelCase ) > 1
lowerCAmelCase__ : List[str] = inds.reshape(ishape[0] ,-1 )
lowerCAmelCase__ : int = self.used.to(__UpperCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
lowerCAmelCase__ : Any = 0 # simply set to zero
lowerCAmelCase__ : Union[str, Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,__UpperCAmelCase )
return back.reshape(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
# reshape z -> (batch, height, width, channel) and flatten
lowerCAmelCase__ : Optional[int] = z.permute(0 ,2 ,3 ,1 ).contiguous()
lowerCAmelCase__ : Tuple = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCAmelCase__ : Optional[Any] = torch.argmin(torch.cdist(__UpperCAmelCase ,self.embedding.weight ) ,dim=1 )
lowerCAmelCase__ : Dict = self.embedding(__UpperCAmelCase ).view(z.shape )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : int = None
# compute loss for embedding
if not self.legacy:
lowerCAmelCase__ : List[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCAmelCase__ : int = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCAmelCase__ : Optional[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCAmelCase__ : List[Any] = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
lowerCAmelCase__ : Tuple = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
lowerCAmelCase__ : int = self.remap_to_used(__UpperCAmelCase )
lowerCAmelCase__ : Dict = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
lowerCAmelCase__ : Union[str, Any] = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCAmelCase__ : str = indices.reshape(shape[0] ,-1 ) # add batch axis
lowerCAmelCase__ : Dict = self.unmap_to_all(__UpperCAmelCase )
lowerCAmelCase__ : Dict = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCAmelCase__ : Optional[int] = self.embedding(__UpperCAmelCase )
if shape is not None:
lowerCAmelCase__ : str = z_q.view(__UpperCAmelCase )
# reshape back to match original input shape
lowerCAmelCase__ : Dict = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Dict:
lowerCAmelCase__ : int = parameters
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = torch.chunk(__UpperCAmelCase ,2 ,dim=1 )
lowerCAmelCase__ : Optional[Any] = torch.clamp(self.logvar ,-3_0.0 ,2_0.0 )
lowerCAmelCase__ : List[Any] = deterministic
lowerCAmelCase__ : str = torch.exp(0.5 * self.logvar )
lowerCAmelCase__ : str = torch.exp(self.logvar )
if self.deterministic:
lowerCAmelCase__ : Optional[int] = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def UpperCAmelCase_ ( self ,__UpperCAmelCase = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCAmelCase__ : str = randn_tensor(
self.mean.shape ,generator=__UpperCAmelCase ,device=self.parameters.device ,dtype=self.parameters.dtype )
lowerCAmelCase__ : List[str] = self.mean + self.std * sample
return x
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ) -> str:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=[1, 2, 3] ) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCAmelCase__ : str = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.mean
| 37 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = []
for old_item in old_list:
lowerCAmelCase__ : Optional[Any] = old_item.replace("""in_layers.0""" , """norm1""" )
lowerCAmelCase__ : Optional[int] = new_item.replace("""in_layers.2""" , """conv1""" )
lowerCAmelCase__ : Dict = new_item.replace("""out_layers.0""" , """norm2""" )
lowerCAmelCase__ : str = new_item.replace("""out_layers.3""" , """conv2""" )
lowerCAmelCase__ : str = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
lowerCAmelCase__ : Union[str, Any] = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ : int = []
for old_item in old_list:
lowerCAmelCase__ : List[str] = old_item
lowerCAmelCase__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""norm.bias""" , """group_norm.bias""" )
lowerCAmelCase__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
lowerCAmelCase__ : int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
lowerCAmelCase__ : str = shave_segments(UpperCamelCase , n_shave_prefix_segments=UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase__ : Any = old_checkpoint[path]
lowerCAmelCase__ : int = old_tensor.shape[0] // 3
lowerCAmelCase__ : int = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase__ : Tuple = old_tensor.shape[0] // config["""num_head_channels"""] // 3
lowerCAmelCase__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase__ : int = query.reshape(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = key.reshape(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = value.reshape(UpperCamelCase )
for path in paths:
lowerCAmelCase__ : Any = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase__ : Any = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
lowerCAmelCase__ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
lowerCAmelCase__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase__ : Any = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase__ : List[Any] = old_checkpoint[path["""old"""]][:, :, 0]
else:
lowerCAmelCase__ : Dict = old_checkpoint[path["""old"""]]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : str = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase__ : List[str] = checkpoint["""time_embed.2.bias"""]
lowerCAmelCase__ : str = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase__ : Any = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.weight"""]
lowerCAmelCase__ : Union[str, Any] = checkpoint["""out.0.bias"""]
lowerCAmelCase__ : str = checkpoint["""out.2.weight"""]
lowerCAmelCase__ : Tuple = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
lowerCAmelCase__ : Optional[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
lowerCAmelCase__ : Optional[Any] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase__ : Union[str, Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
lowerCAmelCase__ : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
lowerCAmelCase__ : List[Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(UpperCamelCase )
}
for i in range(1 , UpperCamelCase ):
lowerCAmelCase__ : Dict = (i - 1) // (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : Tuple = (i - 1) % (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowerCAmelCase__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowerCAmelCase__ : Optional[int] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowerCAmelCase__ : Tuple = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowerCAmelCase__ : Optional[Any] = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : Dict = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowerCAmelCase__ : Optional[Any] = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase )
if len(UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Tuple = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase__ : List[str] = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase , )
lowerCAmelCase__ : Dict = middle_blocks[0]
lowerCAmelCase__ : Union[str, Any] = middle_blocks[1]
lowerCAmelCase__ : Dict = middle_blocks[2]
lowerCAmelCase__ : Any = renew_resnet_paths(UpperCamelCase )
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : Dict = renew_resnet_paths(UpperCamelCase )
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , config=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , attention_paths_to_split=UpperCamelCase , config=UpperCamelCase )
for i in range(UpperCamelCase ):
lowerCAmelCase__ : Tuple = i // (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : List[str] = i % (config["""num_res_blocks"""] + 1)
lowerCAmelCase__ : int = [shave_segments(UpperCamelCase , 2 ) for name in output_blocks[i]]
lowerCAmelCase__ : Union[str, Any] = {}
for layer in output_block_layers:
lowerCAmelCase__ , lowerCAmelCase__ : Any = layer.split(""".""" )[0], shave_segments(UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCamelCase )
else:
lowerCAmelCase__ : str = [layer_name]
if len(UpperCamelCase ) > 1:
lowerCAmelCase__ : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowerCAmelCase__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowerCAmelCase__ : Optional[int] = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , config=UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase__ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
lowerCAmelCase__ : int = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowerCAmelCase__ : int = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(UpperCamelCase ) == 2:
lowerCAmelCase__ : Tuple = []
if len(UpperCamelCase ):
lowerCAmelCase__ : Dict = renew_attention_paths(UpperCamelCase )
lowerCAmelCase__ : Tuple = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase__ : Tuple = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
UpperCamelCase , UpperCamelCase , UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=UpperCamelCase , )
else:
lowerCAmelCase__ : int = renew_resnet_paths(UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase__ : Tuple = """.""".join(["""output_blocks""", str(UpperCamelCase ), path["""old"""]] )
lowerCAmelCase__ : List[Any] = """.""".join(["""up_blocks""", str(UpperCamelCase ), """resnets""", str(UpperCamelCase ), path["""new"""]] )
lowerCAmelCase__ : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowerCAmelCase = json.loads(f.read())
_lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
_lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 37 | 1 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCAmelCase_ : Union[str, Any] = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, '''r''', encoding='''utf-8''') as f:
lowerCAmelCase_ : int = json.load(f)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self , snake_case__ ) -> Optional[int]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(__lowerCamelCase )
def UpperCamelCase_ ( self , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FSMTForConditionalGeneration.from_pretrained(__lowerCamelCase ).to(__lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = f'''facebook/wmt19-{pair}'''
UpperCAmelCase = self.get_tokenizer(__lowerCamelCase )
UpperCAmelCase = self.get_model(__lowerCamelCase )
UpperCAmelCase = bleu_data[pair]['''src''']
UpperCAmelCase = bleu_data[pair]['''tgt''']
UpperCAmelCase = tokenizer(__lowerCamelCase , return_tensors="""pt""" , truncation=__lowerCamelCase , padding="""longest""" ).to(__lowerCamelCase )
UpperCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase = tokenizer.batch_decode(
__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
UpperCAmelCase = calculate_bleu(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
self.assertGreaterEqual(scores["""bleu"""] , __lowerCamelCase )
| 356 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase_ : str = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCAmelCase_ : Any = (
subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
)
lowerCAmelCase_ : Optional[int] = '''|'''.join(sys.argv[1:])
lowerCAmelCase_ : Union[str, Any] = re.compile(RF'^({joined_dirs}).*?\.py$')
lowerCAmelCase_ : int = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 248 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MvpTokenizer
__SCREAMING_SNAKE_CASE = MvpTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = filter_roberta_detectors
def _lowerCamelCase ( self) -> str:
super().setUp()
_A : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_A : Tuple = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_A : Tuple = {"unk_token": "<unk>"}
_A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
_A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(__lowerCamelCase) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(__lowerCamelCase))
def _lowerCamelCase ( self , **__lowerCamelCase) -> int:
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase)
def _lowerCamelCase ( self , **__lowerCamelCase) -> Any:
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self) -> Tuple:
return MvpTokenizer.from_pretrained("RUCAIBox/mvp")
@cached_property
def _lowerCamelCase ( self) -> Optional[int]:
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp")
@require_torch
def _lowerCamelCase ( self) -> Optional[Any]:
_A : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_A : Any = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : str = tokenizer(__lowerCamelCase , max_length=len(__lowerCamelCase) , padding=__lowerCamelCase , return_tensors="pt")
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
_A : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase)
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self) -> Any:
_A : Any = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : Any = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt")
# check if input_ids are returned and no labels
self.assertIn("input_ids" , __lowerCamelCase)
self.assertIn("attention_mask" , __lowerCamelCase)
self.assertNotIn("labels" , __lowerCamelCase)
self.assertNotIn("decoder_attention_mask" , __lowerCamelCase)
@require_torch
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[Any] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : Tuple = tokenizer(text_target=__lowerCamelCase , max_length=3_2 , padding="max_length" , return_tensors="pt")
self.assertEqual(3_2 , targets["input_ids"].shape[1])
@require_torch
def _lowerCamelCase ( self) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : str = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt")
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4))
@require_torch
def _lowerCamelCase ( self) -> Tuple:
_A : Union[str, Any] = ["A long paragraph for summarization."]
_A : Dict = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_A : Dict = tokenizer(__lowerCamelCase , text_target=__lowerCamelCase , return_tensors="pt")
_A : Dict = inputs["input_ids"]
_A : List[str] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_A : Tuple = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : Union[str, Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase)
_A : Dict = "A, <mask> AllenNLP sentence."
_A : List[str] = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase)
_A : List[Any] = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"]) , sum(tokens_p["token_type_ids"]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]) , sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]) , )
_A : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
_A : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"])
| 11 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_UpperCAmelCase : int = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase : Optional[datasets.Features] = None
def __magic_name__( lowerCamelCase, lowerCamelCase, ):
import pyspark
def generate_fn():
__lowerCAmelCase = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id'''))
for partition_id in partition_order:
__lowerCAmelCase = df_with_partition_id.select('''*''').where(F"""part_id = {partition_id}""").drop('''part_id''')
__lowerCAmelCase = partition_df.collect()
__lowerCAmelCase = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class a__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , ):
__lowerCAmelCase = df
__lowerCAmelCase = partition_order or range(self.df.rdd.getNumPartitions() )
__lowerCAmelCase = _generate_iterable_examples(self.df , self.partition_order )
def __iter__(self ):
yield from self.generate_examples_fn()
def _snake_case (self , __lowercase ):
__lowerCAmelCase = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowercase )
return SparkExamplesIterable(self.df , partition_order=__lowercase )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = self.split_shard_indices_by_worker(__lowercase , __lowercase )
return SparkExamplesIterable(self.df , partition_order=__lowercase )
@property
def _snake_case (self ):
return len(self.partition_order )
class a__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__UpperCamelCase : int = SparkConfig
def __init__(self , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ):
import pyspark
__lowerCAmelCase = pyspark.sql.SparkSession.builder.getOrCreate()
__lowerCAmelCase = df
__lowerCAmelCase = working_dir
super().__init__(
cache_dir=__lowercase , config_name=str(self.df.semanticHash() ) , **__lowercase , )
def _snake_case (self ):
# Returns the path of the created file.
def create_cache_and_write_probe(__lowercase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowercase )
__lowerCAmelCase = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowercase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__lowerCAmelCase = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowercase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def _snake_case (self ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case (self , __lowercase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _snake_case (self , __lowercase ):
import pyspark
def get_arrow_batch_size(__lowercase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
__lowerCAmelCase = self.df.count()
__lowerCAmelCase = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__lowerCAmelCase = (
self.df.limit(__lowercase )
.repartition(1 )
.mapInArrow(__lowercase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__lowerCAmelCase = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__lowerCAmelCase = min(__lowercase , int(approx_total_size / max_shard_size ) )
__lowerCAmelCase = self.df.repartition(__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase , ):
import pyspark
__lowerCAmelCase = ParquetWriter if file_format == '''parquet''' else ArrowWriter
__lowerCAmelCase = os.path.join(self._working_dir , os.path.basename(__lowercase ) ) if self._working_dir else fpath
__lowerCAmelCase = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__lowerCAmelCase = self.config.features
__lowerCAmelCase = self._writer_batch_size
__lowerCAmelCase = self._fs.storage_options
def write_arrow(__lowercase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__lowerCAmelCase = pyspark.TaskContext().taskAttemptId()
__lowerCAmelCase = next(__lowercase , __lowercase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
__lowerCAmelCase = 0
__lowerCAmelCase = writer_class(
features=__lowercase , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , )
__lowerCAmelCase = pa.Table.from_batches([first_batch] )
writer.write_table(__lowercase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
__lowerCAmelCase = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , )
__lowerCAmelCase = pa.Table.from_batches([batch] )
writer.write_table(__lowercase )
if writer._num_bytes > 0:
__lowerCAmelCase , __lowerCAmelCase = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowercase ) ):
__lowerCAmelCase = os.path.join(os.path.dirname(__lowercase ) , os.path.basename(__lowercase ) )
shutil.move(__lowercase , __lowercase )
__lowerCAmelCase = (
self.df.mapInArrow(__lowercase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _snake_case (self , __lowercase , __lowercase = "arrow" , __lowercase = None , __lowercase = None , **__lowercase , ):
self._validate_cache_dir()
__lowerCAmelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowercase )
__lowerCAmelCase = not is_remote_filesystem(self._fs )
__lowerCAmelCase = os.path.join if is_local else posixpath.join
__lowerCAmelCase = '''-TTTTT-SSSSS-of-NNNNN'''
__lowerCAmelCase = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
__lowerCAmelCase = path_join(self._output_dir , __lowercase )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = []
for task_id, content in self._prepare_split_single(__lowercase , __lowercase , __lowercase ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowercase )
__lowerCAmelCase = total_num_examples
__lowerCAmelCase = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
__lowerCAmelCase = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__lowerCAmelCase = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowercase , __lowercase , __lowercase , ):
rename(
__lowercase , fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , F"""{global_shard_id:05d}""" ).replace('''NNNNN''' , F"""{total_shards:05d}""" ) , )
__lowerCAmelCase = []
__lowerCAmelCase = 0
for i in range(len(__lowercase ) ):
__lowerCAmelCase , __lowerCAmelCase = task_id_and_num_shards[i]
for shard_id in range(__lowercase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowercase , len(__lowercase ) ).map(lambda __lowercase : _rename_shard(*__lowercase ) ).collect()
else:
# don't use any pattern
__lowerCAmelCase = 0
__lowerCAmelCase = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , F"""{shard_id:05d}""" ).replace('''TTTTT''' , F"""{task_id:05d}""" ) , fpath.replace(__lowercase , '''''' ) , )
def _snake_case (self , __lowercase , ):
return SparkExamplesIterable(self.df )
| 174 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ : List[str] = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
UpperCamelCase__ : Any = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
UpperCamelCase__ : Any = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
UpperCamelCase__ : Dict = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'yolos'
def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = num_detection_tokens
a = use_mid_position_embeddings
a = auxiliary_loss
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return 12
| 330 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
_lowerCAmelCase : Tuple = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
_lowerCAmelCase : Tuple = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def UpperCamelCase_( _snake_case : Dict ):
"""simple docstring"""
__a =(images / 2 + 0.5).clamp(0 , 1 )
__a =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a =numpy_to_pil(_snake_case )
return images
def UpperCamelCase_( _snake_case : Any ):
"""simple docstring"""
if images.ndim == 3:
__a =images[None, ...]
__a =(images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__a =[Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__a =[Image.fromarray(_snake_case ) for image in images]
return pil_images
| 218 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowerCAmelCase : Optional[Any] = Lock()
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any , _snake_case : List[str] , _snake_case : Tuple , _snake_case : List[str] ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_snake_case )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__a =rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__a =min(_snake_case , _snake_case )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_snake_case )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__a =lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__a =max(_snake_case , _snake_case )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_snake_case )
def UpperCamelCase_( _snake_case : List[str] ):
"""simple docstring"""
__a =[]
__a =[]
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__a =Pipe()
__a =Pipe()
process_array_.append(
Process(
target=_snake_case , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__a =temp_rs
__a =temp_rr
for i in range(1 , len(_snake_case ) - 1 ):
__a =Pipe()
__a =Pipe()
process_array_.append(
Process(
target=_snake_case , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__a =temp_rs
__a =temp_rr
process_array_.append(
Process(
target=_snake_case , args=(
len(_snake_case ) - 1,
arr[len(_snake_case ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_snake_case ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_snake_case ) ):
__a =result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase_( ):
"""simple docstring"""
__a =list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_snake_case )
__a =odd_even_transposition(_snake_case )
print('Sorted List\n' )
print(*_snake_case )
if __name__ == "__main__":
main()
| 218 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__snake_case = logging.get_logger(__name__)
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> List[int]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
return list(tensor.shape )
snake_case : List[str] = tf.shape(SCREAMING_SNAKE_CASE__ )
if tensor.shape == tf.TensorShape(SCREAMING_SNAKE_CASE__ ):
return dynamic
snake_case : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(SCREAMING_SNAKE_CASE__ )]
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : str = None , lowercase : int = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 , axis=SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( lowercase : List[Any] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : str=1e-5 , lowercase : List[Any]=-1 ) -> str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
snake_case : Tuple = tf.nn.moments(SCREAMING_SNAKE_CASE__ , axes=[axis] , keepdims=SCREAMING_SNAKE_CASE__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
snake_case : int = [1] * inputs.shape.rank
snake_case : List[Any] = shape_list(SCREAMING_SNAKE_CASE__ )[axis]
snake_case : Tuple = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : int = tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Compute layer normalization using the batch_normalization
# function.
snake_case : Dict = tf.nn.batch_normalization(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , variance_epsilon=SCREAMING_SNAKE_CASE__ , )
return outputs
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : str=0 , lowercase : Tuple=-1 ) -> int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
snake_case : Optional[int] = tf.shape(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
snake_case : str = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( lowercase : Optional[int] ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
snake_case : Optional[int] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
snake_case : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
snake_case : Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
snake_case : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowerCAmelCase ( lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[Any] = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
SCREAMING_SNAKE_CASE__ , tf.cast(SCREAMING_SNAKE_CASE__ , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(SCREAMING_SNAKE_CASE__ )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : str , lowercase : List[Any] ) -> int:
"""simple docstring"""
snake_case : Union[str, Any] = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
snake_case : Optional[Any] = [x for x in data if len(SCREAMING_SNAKE_CASE__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
snake_case : str = np.asarray(SCREAMING_SNAKE_CASE__ )
snake_case : Dict = 1
snake_case : List[Any] = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
snake_case : List[Any] = np.array_split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case : Optional[Any] = chunk_data
else:
snake_case : str = data
def __lowerCAmelCase ( lowercase : Any , lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
if name in group.attrs:
snake_case : str = [n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs[name]]
else:
snake_case : Dict = []
snake_case : Any = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(SCREAMING_SNAKE_CASE__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowerCAmelCase ( lowercase : int ) -> Any:
"""simple docstring"""
def _expand_single_ad_tensor(lowercase : int ):
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , SCREAMING_SNAKE_CASE__ )
| 367 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase ( lowercase : List[str] , lowercase : int , lowercase : Dict , lowercase : Dict , lowercase : int ) -> int:
"""simple docstring"""
with open(lowercase ) as metadata_file:
snake_case : str = json.load(lowercase )
snake_case : Optional[Any] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
snake_case : Tuple = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
snake_case : Optional[Any] = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
snake_case : Dict = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Tuple = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
snake_case : str = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
snake_case : str = json.load(lowercase )
snake_case : List[str] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
snake_case : Dict = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
snake_case : Tuple = tokenizer.convert_tokens_to_ids(["@"] )[0]
snake_case : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
snake_case : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
snake_case : str = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Tuple = state_dict[bias_name]
snake_case : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case : str = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Optional[int] = state_dict["entity_predictions.bias"]
snake_case : Optional[int] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : Union[str, Any] = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
snake_case : Any = state_dict[key]
else:
snake_case : Tuple = state_dict[key]
snake_case ,snake_case : Optional[Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : Optional[Any] = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
snake_case : List[str] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
snake_case : str = (0, 9)
snake_case : Union[str, Any] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : int = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : int = torch.Size((1, 33, 768) )
snake_case : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Any = torch.Size((1, 1, 768) )
snake_case : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : List[str] = MLukeTokenizer.from_pretrained(lowercase )
snake_case : List[Any] = "Tokyo is the capital of <mask>."
snake_case : Optional[Any] = (24, 30)
snake_case : List[str] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : Any = model(**lowercase )
snake_case : int = encoding["input_ids"][0].tolist()
snake_case : str = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
snake_case : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
snake_case : Tuple = outputs.entity_logits[0][0].argmax().item()
snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : Tuple = ["[MASK]", "[PAD]", "[UNK]"]
snake_case : Optional[Any] = [json.loads(lowercase ) for line in open(lowercase )]
snake_case : Any = {}
for entry in data:
snake_case : Union[str, Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : Union[str, Any] = entity_id
break
snake_case : Dict = F'{language}:{entity_name}'
snake_case : str = entity_id
return new_mapping
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 112 | 0 |
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__SCREAMING_SNAKE_CASE : Dict = data_utils.TransfoXLTokenizer
__SCREAMING_SNAKE_CASE : List[str] = data_utils.TransfoXLCorpus
__SCREAMING_SNAKE_CASE : str = data_utils
__SCREAMING_SNAKE_CASE : Union[str, Any] = data_utils
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCAmelCase , "rb" ) as fp:
_UpperCAmelCase : Optional[Any] = pickle.load(_UpperCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase : List[str] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase : Any = corpus.vocab.__dict__
torch.save(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , _UpperCAmelCase )
_UpperCAmelCase : int = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase : Tuple = os.path.abspath(_UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = os.path.abspath(_UpperCAmelCase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase : Tuple = TransfoXLConfig()
else:
_UpperCAmelCase : Union[str, Any] = TransfoXLConfig.from_json_file(_UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase : Any = TransfoXLLMHeadModel(_UpperCAmelCase )
_UpperCAmelCase : Tuple = load_tf_weights_in_transfo_xl(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
_UpperCAmelCase : Any = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : List[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCAmelCase )}""" )
torch.save(model.state_dict() , _UpperCAmelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCAmelCase )}""" )
with open(_UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 31 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Any ) -> Any:
# Initialise PyTorch model
lowercase : Union[str, Any] = RemBertConfig.from_json_file(__snake_case )
print("Building PyTorch model from configuration: {}".format(str(__snake_case ) ) )
lowercase : str = RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print("Save PyTorch model to {}".format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 202 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCamelCase = torch.zeros(lowerCAmelCase__ , lowerCAmelCase__ )
else:
_UpperCamelCase = None
_UpperCamelCase = torch.nn.Parameter(lowerCAmelCase__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : VQModel
_snake_case : CLIPTextModel
_snake_case : CLIPTokenizer
_snake_case : TransformeraDModel
_snake_case : LearnedClassifierFreeSamplingEmbeddings
_snake_case : VQDiffusionScheduler
def __init__( self : Dict , lowerCAmelCase__ : VQModel , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : TransformeraDModel , lowerCAmelCase__ : VQDiffusionScheduler , lowerCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=lowerCAmelCase__ , transformer=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , learned_classifier_free_sampling_embeddings=lowerCAmelCase__ , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ) -> int:
'''simple docstring'''
_UpperCamelCase = len(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else 1
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ )
# duplicate text embeddings for each generation per prompt
_UpperCamelCase = prompt_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCAmelCase__ , 1 , 1 )
else:
_UpperCamelCase = [''''''] * batch_size
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCAmelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = negative_prompt_embeds.shape[1]
_UpperCamelCase = negative_prompt_embeds.repeat(1 , lowerCAmelCase__ , 1 )
_UpperCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : str , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 100 , lowerCAmelCase__ : float = 5.0 , lowerCAmelCase__ : float = 1.0 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = len(lowerCAmelCase__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" )
_UpperCamelCase = batch_size * num_images_per_prompt
_UpperCamelCase = guidance_scale > 1.0
_UpperCamelCase = self._encode_prompt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCAmelCase__ )}.""" )
# get the initial completely masked latents unless the user supplied it
_UpperCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCamelCase = self.transformer.num_vector_embeds - 1
_UpperCamelCase = torch.full(lowerCAmelCase__ , lowerCAmelCase__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_UpperCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device )
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
_UpperCamelCase = latents
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the sample if we are doing classifier free guidance
_UpperCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCamelCase = self.transformer(lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ ).sample
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = model_output.chunk(2 )
_UpperCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCAmelCase__ , dim=1 , keepdim=lowerCAmelCase__ )
_UpperCamelCase = self.truncate(lowerCAmelCase__ , lowerCAmelCase__ )
# remove `log(0)`'s (`-inf`s)
_UpperCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.vqvae.config.vq_embed_dim
_UpperCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCamelCase = self.vqvae.quantize.get_codebook_entry(lowerCAmelCase__ , shape=lowerCAmelCase__ )
_UpperCamelCase = self.vqvae.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : float ) -> torch.FloatTensor:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = torch.sort(lowerCAmelCase__ , 1 , descending=lowerCAmelCase__ )
_UpperCamelCase = torch.exp(lowerCAmelCase__ )
_UpperCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCamelCase = torch.full_like(keep_mask[:, 0:1, :] , lowerCAmelCase__ )
_UpperCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCamelCase = keep_mask[:, :-1, :]
_UpperCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCamelCase = log_p_x_0.clone()
_UpperCamelCase = -torch.inf # -inf = log(0)
return rv
| 350 |
'''simple docstring'''
import math
def a__ ( lowercase : float, lowercase : float ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 287 | 0 |
import string
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase =''
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase =string.ascii_uppercase.find(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =num - key
if num < 0:
__UpperCamelCase =num + len(string.ascii_uppercase )
__UpperCamelCase =translated + string.ascii_uppercase[num]
else:
__UpperCamelCase =translated + symbol
print(F'Decryption using Key #{key}: {translated}' )
def _UpperCAmelCase ( ):
__UpperCamelCase =input('Encrypted message: ' )
__UpperCamelCase =message.upper()
decrypt(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 62 |
from __future__ import annotations
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ():
snake_case_ = {}
snake_case_ = 2
while True:
snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if factor:
snake_case_ = factor + prime
while x in factor_map:
x += factor
snake_case_ = factor
else:
snake_case_ = prime
yield prime
prime += 1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ):
snake_case_ = sieve()
snake_case_ = 1
while True:
snake_case_ = next(SCREAMING_SNAKE_CASE__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE__ )
n += 2
if __name__ == "__main__":
print(solution()) | 8 | 0 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=0.0 , _a = None , _a = "geglu" , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = "layer_norm" , _a = False , ):
super().__init__()
__a = only_cross_attention
__a = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__a = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__a = AdaLayerNorm(_a , _a )
elif self.use_ada_layer_norm_zero:
__a = AdaLayerNormZero(_a , _a )
else:
__a = nn.LayerNorm(_a , elementwise_affine=_a )
__a = Attention(
query_dim=_a , heads=_a , dim_head=_a , dropout=_a , bias=_a , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_a , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__a = (
AdaLayerNorm(_a , _a )
if self.use_ada_layer_norm
else nn.LayerNorm(_a , elementwise_affine=_a )
)
__a = Attention(
query_dim=_a , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_a , dim_head=_a , dropout=_a , bias=_a , upcast_attention=_a , ) # is self-attn if encoder_hidden_states is none
else:
__a = None
__a = None
# 3. Feed-forward
__a = nn.LayerNorm(_a , elementwise_affine=_a )
__a = FeedForward(_a , dropout=_a , activation_fn=_a , final_dropout=_a )
# let chunk size default to None
__a = None
__a = 0
def __UpperCAmelCase ( self , _a , _a ):
# Sets chunk feed-forward
__a = chunk_size
__a = dim
def __UpperCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__a = self.norma(_a , _a )
elif self.use_ada_layer_norm_zero:
__a , __a , __a , __a , __a = self.norma(
_a , _a , _a , hidden_dtype=hidden_states.dtype )
else:
__a = self.norma(_a )
__a = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__a = self.attna(
_a , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_a , **_a , )
if self.use_ada_layer_norm_zero:
__a = gate_msa.unsqueeze(1 ) * attn_output
__a = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__a = (
self.norma(_a , _a ) if self.use_ada_layer_norm else self.norma(_a )
)
__a = self.attna(
_a , encoder_hidden_states=_a , attention_mask=_a , **_a , )
__a = attn_output + hidden_states
# 3. Feed-forward
__a = self.norma(_a )
if self.use_ada_layer_norm_zero:
__a = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
__a = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__a = torch.cat(
[self.ff(_a ) for hid_slice in norm_hidden_states.chunk(_a , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__a = self.ff(_a )
if self.use_ada_layer_norm_zero:
__a = gate_mlp.unsqueeze(1 ) * ff_output
__a = ff_output + hidden_states
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a = None , _a = 4 , _a = 0.0 , _a = "geglu" , _a = False , ):
super().__init__()
__a = int(dim * mult )
__a = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__a = GELU(_a , _a )
if activation_fn == "gelu-approximate":
__a = GELU(_a , _a , approximate='''tanh''' )
elif activation_fn == "geglu":
__a = GEGLU(_a , _a )
elif activation_fn == "geglu-approximate":
__a = ApproximateGELU(_a , _a )
__a = nn.ModuleList([] )
# project in
self.net.append(_a )
# project dropout
self.net.append(nn.Dropout(_a ) )
# project out
self.net.append(nn.Linear(_a , _a ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_a ) )
def __UpperCAmelCase ( self , _a ):
for module in self.net:
__a = module(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a = "none" ):
super().__init__()
__a = nn.Linear(_a , _a )
__a = approximate
def __UpperCAmelCase ( self , _a ):
if gate.device.type != "mps":
return F.gelu(_a , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __UpperCAmelCase ( self , _a ):
__a = self.proj(_a )
__a = self.gelu(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = nn.Linear(_a , dim_out * 2 )
def __UpperCAmelCase ( self , _a ):
if gate.device.type != "mps":
return F.gelu(_a )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __UpperCAmelCase ( self , _a ):
__a , __a = self.proj(_a ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_a )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = nn.Linear(_a , _a )
def __UpperCAmelCase ( self , _a ):
__a = self.proj(_a )
return x * torch.sigmoid(1.702 * x )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = nn.Embedding(_a , _a )
__a = nn.SiLU()
__a = nn.Linear(_a , embedding_dim * 2 )
__a = nn.LayerNorm(_a , elementwise_affine=_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.linear(self.silu(self.emb(_a ) ) )
__a , __a = torch.chunk(_a , 2 )
__a = self.norm(_a ) * (1 + scale) + shift
return x
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = CombinedTimestepLabelEmbeddings(_a , _a )
__a = nn.SiLU()
__a = nn.Linear(_a , 6 * embedding_dim , bias=_a )
__a = nn.LayerNorm(_a , elementwise_affine=_a , eps=1E-6 )
def __UpperCAmelCase ( self , _a , _a , _a , _a=None ):
__a = self.linear(self.silu(self.emb(_a , _a , hidden_dtype=_a ) ) )
__a , __a , __a , __a , __a , __a = emb.chunk(6 , dim=1 )
__a = self.norm(_a ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = None , _a = 1E-5 ):
super().__init__()
__a = num_groups
__a = eps
if act_fn is None:
__a = None
else:
__a = get_activation(_a )
__a = nn.Linear(_a , out_dim * 2 )
def __UpperCAmelCase ( self , _a , _a ):
if self.act:
__a = self.act(_a )
__a = self.linear(_a )
__a = emb[:, :, None, None]
__a , __a = emb.chunk(2 , dim=1 )
__a = F.group_norm(_a , self.num_groups , eps=self.eps )
__a = x * (1 + scale) + shift
return x
| 11 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Dict = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = flatten_dict(_UpperCAmelCase )
return flax_params
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : List[str] = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
lowerCamelCase__ : int = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase__ : Union[str, Any] = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase__ : str = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase__ : int = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase__ : Tuple = re.sub(r'layers_(\d+)' , r'layer.\1' , _UpperCAmelCase )
lowerCamelCase__ : Dict = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase__ : str = re.sub(r'layers_(\d+)' , r'layer.\1' , _UpperCAmelCase )
lowerCamelCase__ : Optional[int] = flax_dict[key]
lowerCamelCase__ : Any = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase__ : Tuple = torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase__ : Union[str, Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ) -> List[str]:
lowerCamelCase__ : Any = get_flax_param(_UpperCAmelCase )
if not use_large:
lowerCamelCase__ : str = PixaStructVisionConfig()
lowerCamelCase__ : Dict = PixaStructTextConfig()
else:
lowerCamelCase__ : Dict = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase__ : Optional[Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
lowerCamelCase__ : Any = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = PixaStructForConditionalGeneration(_UpperCAmelCase )
lowerCamelCase__ : Tuple = rename_and_convert_flax_params(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
lowerCamelCase__ : List[str] = PixaStructImageProcessor()
lowerCamelCase__ : Dict = PixaStructProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
if use_large:
lowerCamelCase__ : Any = 4096
lowerCamelCase__ : Tuple = True
# mkdir if needed
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
print('Model saved in {}'.format(_UpperCAmelCase ) )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 50 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase : Union[str, Any] = ["text", "image", "audio"]
def snake_case__ ( __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =[]
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
inputs.append(create_inputs(__lowerCamelCase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def snake_case__ ( __lowerCamelCase : List ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[]
for output in outputs:
if isinstance(__lowerCamelCase , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(__lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(__lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def snake_case ( self : Any )-> Optional[Any]:
self.assertTrue(hasattr(self.tool, '''inputs''' ) )
self.assertTrue(hasattr(self.tool, '''outputs''' ) )
lowerCamelCase__ : Tuple =self.tool.inputs
for _input in inputs:
if isinstance(_input, lowerCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase__ : Optional[Any] =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def snake_case ( self : Optional[int] )-> Union[str, Any]:
lowerCamelCase__ : Optional[int] =create_inputs(self.tool.inputs )
lowerCamelCase__ : List[Any] =self.tool(*lowerCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase__ : Optional[int] =[outputs]
self.assertListEqual(output_types(lowerCamelCase ), self.tool.outputs )
def snake_case ( self : Union[str, Any] )-> List[str]:
self.assertTrue(hasattr(self.tool, '''description''' ) )
self.assertTrue(hasattr(self.tool, '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def snake_case ( self : Union[str, Any] )-> str:
lowerCamelCase__ : List[str] =create_inputs(self.tool.inputs )
lowerCamelCase__ : Optional[Any] =self.tool(*lowerCamelCase )
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Any =[outputs]
self.assertEqual(len(lowerCamelCase ), len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase, self.tool.outputs ):
lowerCamelCase__ : List[Any] =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase, lowerCamelCase ) )
def snake_case ( self : Optional[Any] )-> List[Any]:
lowerCamelCase__ : Optional[Any] =create_inputs(self.tool.inputs )
lowerCamelCase__ : List[str] =[]
for _input, input_type in zip(lowerCamelCase, self.tool.inputs ):
if isinstance(lowerCamelCase, lowerCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase__ : Any =self.tool(*lowerCamelCase )
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Optional[int] =[outputs]
self.assertEqual(len(lowerCamelCase ), len(self.tool.outputs ) )
| 238 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase_ : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase ( nn.Module ):
def __init__( self , UpperCAmelCase__ ):
super().__init__()
A__ = torchvision.models.resnetaaa(pretrained=UpperCAmelCase__ )
A__ = list(model.children() )[:-2]
A__ = nn.Sequential(*UpperCAmelCase__ )
A__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __A ( self , UpperCAmelCase__ ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
A__ = self.pool(self.model(UpperCAmelCase__ ) )
A__ = torch.flatten(UpperCAmelCase__ , start_dim=2 )
A__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = [json.loads(UpperCAmelCase__ ) for l in open(UpperCAmelCase__ )]
A__ = os.path.dirname(UpperCAmelCase__ )
A__ = tokenizer
A__ = labels
A__ = len(UpperCAmelCase__ )
A__ = max_seq_length
A__ = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , UpperCAmelCase__ ):
A__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=UpperCAmelCase__ ) )
A__ , A__ , A__ = sentence[0], sentence[1:-1], sentence[-1]
A__ = sentence[: self.max_seq_length]
A__ = torch.zeros(self.n_classes )
A__ = 1
A__ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
A__ = self.transforms(UpperCAmelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __A ( self ):
A__ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def UpperCamelCase ( _A : Optional[int] )-> Optional[int]:
"""simple docstring"""
A__ = [len(row["sentence"] ) for row in batch]
A__ , A__ = len(_A ), max(_A )
A__ = torch.zeros(_A , _A , dtype=torch.long )
A__ = torch.zeros(_A , _A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_A , _A ) ):
A__ = input_row["sentence"]
A__ = 1
A__ = torch.stack([row["image"] for row in batch] )
A__ = torch.stack([row["label"] for row in batch] )
A__ = torch.stack([row["image_start_token"] for row in batch] )
A__ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCamelCase ( )-> List[str]:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCamelCase ( )-> Any:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 198 |
from manim import *
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = Rectangle(height=0.5 , width=0.5 )
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = VGroup(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("CPU" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(4 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("GPU" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("Model" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0.5 , aligned_edge=UpperCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase__ )
A__ = []
for i, rect in enumerate(UpperCAmelCase__ ):
rect.set_stroke(UpperCAmelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
A__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=UpperCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=UpperCAmelCase__ , buff=0.0 )
self.add(UpperCAmelCase__ )
cpu_targs.append(UpperCAmelCase__ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*UpperCAmelCase__ ).arrange(UpperCAmelCase__ , buff=0 )
A__ = Text("Loaded Checkpoint" , font_size=24 )
A__ = Group(UpperCAmelCase__ , UpperCAmelCase__ ).arrange(UpperCAmelCase__ , aligned_edge=UpperCAmelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
A__ = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase__ ) , Write(UpperCAmelCase__ ) )
self.play(Write(UpperCAmelCase__ , run_time=1 ) , Create(UpperCAmelCase__ , run_time=1 ) )
A__ = []
A__ = []
for i, rect in enumerate(UpperCAmelCase__ ):
A__ = fill.copy().set_fill(UpperCAmelCase__ , opacity=0.7 )
target.move_to(UpperCAmelCase__ )
first_animations.append(GrowFromCenter(UpperCAmelCase__ , run_time=1 ) )
A__ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(UpperCAmelCase__ , run_time=1.5 ) )
self.play(*UpperCAmelCase__ )
self.play(*UpperCAmelCase__ )
self.wait()
| 198 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : int =["""image_processor""", """tokenizer"""]
UpperCamelCase__ : Any ="""BridgeTowerImageProcessor"""
UpperCamelCase__ : List[str] =("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
# add pixel_values + pixel_mask
__UpperCamelCase : Tuple =self.image_processor(
lowerCamelCase__ , return_tensors=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_center_crop=lowerCamelCase__ , **lowerCamelCase__ )
encoding.update(lowerCamelCase__ )
return encoding
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =self.tokenizer.model_input_names
__UpperCamelCase : Optional[int] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 71 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCamelCase (_a ):
_lowercase = ["""image_processor""", """tokenizer"""]
_lowercase = """OwlViTImageProcessor"""
_lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self: int,A_: Tuple=None,A_: int=None,**A_: int ):
'''simple docstring'''
__UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.',A_,)
__UpperCamelCase = kwargs.pop('feature_extractor' )
__UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_,A_ )
def __call__( self: str,A_: Dict=None,A_: Optional[int]=None,A_: Any=None,A_: Tuple="max_length",A_: int="np",**A_: Optional[Any] ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_,A_ ) or (isinstance(A_,A_ ) and not isinstance(text[0],A_ )):
__UpperCamelCase = [self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )]
elif isinstance(A_,A_ ) and isinstance(text[0],A_ ):
__UpperCamelCase = []
# Maximum number of queries across batch
__UpperCamelCase = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
__UpperCamelCase = t + [' '] * (max_num_queries - len(A_ ))
__UpperCamelCase = self.tokenizer(A_,padding=A_,return_tensors=A_,**A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings],axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings],dim=0 )
__UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings],dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings],axis=0 )
__UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings],axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = input_ids
__UpperCamelCase = attention_mask
if query_images is not None:
__UpperCamelCase = BatchEncoding()
__UpperCamelCase = self.image_processor(
A_,return_tensors=A_,**A_ ).pixel_values
__UpperCamelCase = query_pixel_values
if images is not None:
__UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ )
if text is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ),tensor_type=A_ )
def snake_case_ ( self: Optional[int],*A_: int,**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*A_,**A_ )
def snake_case_ ( self: str,*A_: Optional[int],**A_: List[Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_,**A_ )
def snake_case_ ( self: str,*A_: Tuple,**A_: int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_,**A_ )
def snake_case_ ( self: List[str],*A_: str,**A_: List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_,**A_ )
def snake_case_ ( self: int,*A_: Any,**A_: Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*A_,**A_ )
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,)
return self.image_processor_class
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,)
return self.image_processor
| 310 | 0 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class snake_case__ :
"""simple docstring"""
def __UpperCAmelCase ( self : str , __lowerCamelCase : Dict ) -> List[Any]:
raise NotImplementedError()
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
raise NotImplementedError()
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , **__lowerCamelCase : str ) -> Any:
a = tokenizer
a = skip_prompt
a = decode_kwargs
# variables used in the streaming process
a = []
a = 0
a = True
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str ) -> List[Any]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a = text[self.print_len :]
a = []
a = 0
# If the last token is a CJK character, we print the characters.
elif len(__lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a = text[self.print_len :]
self.print_len += len(__lowerCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(__lowerCamelCase )
self.on_finalized_text(__lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> int:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
a = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a = text[self.print_len :]
a = []
a = 0
else:
a = ""
a = True
self.on_finalized_text(__lowerCamelCase , stream_end=__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Dict:
print(__lowerCamelCase , flush=__lowerCamelCase , end="" if not stream_end else None )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[Any] ) -> Any:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : "AutoTokenizer" , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[float] = None , **__lowerCamelCase : Any ) -> int:
super().__init__(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
a = Queue()
a = None
a = timeout
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : bool = False ) -> Tuple:
self.text_queue.put(__lowerCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Dict ) -> str:
return self
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
a = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 107 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __magic_name__ ( A : NDArray[floataa], A : NDArray[floataa], A : list[int], A : int, ):
'''simple docstring'''
a , a = coefficient_matrix.shape
a , a = constant_matrix.shape
if rowsa != colsa:
a = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
a = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
a = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
a = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
a = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
a , a = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
a = []
for row in range(A ):
a = 0
for col in range(A ):
if col == row:
a = table[row][col]
elif col == cols - 1:
a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a = (temp + val) / denom
new_val.append(A )
a = new_val
return [float(A ) for i in new_val]
def __magic_name__ ( A : NDArray[floataa] ):
'''simple docstring'''
a , a = table.shape
a = True
for i in range(0, A ):
a = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
SCREAMING_SNAKE_CASE : int = random.Random()
def UpperCamelCase ( _a , _a=1.0 , _a=None , _a=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
lowercase_ :List[str] = global_rng
lowercase_ :Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=400 , UpperCamelCase_=2000 , UpperCamelCase_=24 , UpperCamelCase_=24 , UpperCamelCase_=0.0 , UpperCamelCase_=1_6000 , UpperCamelCase_=True , UpperCamelCase_=True , ):
lowercase_ :Any = parent
lowercase_ :Optional[int] = batch_size
lowercase_ :str = min_seq_length
lowercase_ :str = max_seq_length
lowercase_ :List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ :str = feature_size
lowercase_ :int = num_mel_bins
lowercase_ :str = padding_value
lowercase_ :int = sampling_rate
lowercase_ :Optional[int] = return_attention_mask
lowercase_ :str = do_normalize
def UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , UpperCamelCase_=False , UpperCamelCase_=False ):
def _flatten(UpperCamelCase_ ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
lowercase_ :Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ :Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ :Dict = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCamelCase ( self ):
lowercase_ :str = SpeechaTextFeatureExtractionTester(self )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.assertTrue(np.all(np.mean(UpperCamelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ :Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Dict = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
lowercase_ :Optional[int] = feature_extractor(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowercase_ :Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
lowercase_ :Tuple = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test batched
lowercase_ :Union[str, Any] = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
lowercase_ :List[str] = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ :List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ :Dict = np.asarray(UpperCamelCase_ )
lowercase_ :str = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
lowercase_ :Any = feature_extractor(UpperCamelCase_ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :str = ['longest', 'max_length', 'do_not_pad']
lowercase_ :List[Any] = [None, 16, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[Any] = feature_extractor(
UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ )
lowercase_ :str = inputs.input_features
lowercase_ :Tuple = inputs.attention_mask
lowercase_ :Dict = [np.sum(UpperCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :List[str] = ['longest', 'max_length', 'do_not_pad']
lowercase_ :Optional[Any] = [None, 16, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = feature_extractor(
UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''np''' , return_attention_mask=UpperCamelCase_ )
lowercase_ :List[str] = inputs.input_features
lowercase_ :Optional[Any] = inputs.attention_mask
lowercase_ :str = [np.sum(UpperCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Union[str, Any] = feature_extractor(
UpperCamelCase_ , padding='''max_length''' , max_length=4 , truncation=UpperCamelCase_ , return_tensors='''np''' , return_attention_mask=UpperCamelCase_ , )
lowercase_ :Dict = inputs.input_features
lowercase_ :List[Any] = inputs.attention_mask
lowercase_ :Optional[int] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCamelCase ( self ):
lowercase_ :Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Optional[int] = feature_extractor(
UpperCamelCase_ , padding='''longest''' , max_length=4 , truncation=UpperCamelCase_ , return_tensors='''np''' , return_attention_mask=UpperCamelCase_ , )
lowercase_ :Tuple = inputs.input_features
lowercase_ :Optional[int] = inputs.attention_mask
lowercase_ :List[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Tuple = feature_extractor(
UpperCamelCase_ , padding='''longest''' , max_length=16 , truncation=UpperCamelCase_ , return_tensors='''np''' , return_attention_mask=UpperCamelCase_ , )
lowercase_ :List[str] = inputs.input_features
lowercase_ :Tuple = inputs.attention_mask
lowercase_ :Optional[Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def UpperCamelCase ( self ):
import torch
lowercase_ :str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Dict = np.random.rand(100 , 32 ).astype(np.floataa )
lowercase_ :Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ :Union[str, Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase_ :Any = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase ( self , UpperCamelCase_ ):
from datasets import load_dataset
lowercase_ :Union[str, Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowercase_ :Union[str, Any] = ds.sort('''id''' ).select(range(UpperCamelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self ):
# fmt: off
lowercase_ :Tuple = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowercase_ :List[Any] = self._load_datasamples(1 )
lowercase_ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :List[Any] = feature_extractor(UpperCamelCase_ , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCamelCase_ , atol=1E-4 ) )
| 362 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
SCREAMING_SNAKE_CASE : Any = random.Random()
def UpperCamelCase ( _a , _a=1.0 , _a=None , _a=None ) -> str:
'''simple docstring'''
if rng is None:
lowercase_ :Optional[Any] = global_rng
lowercase_ :List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=400 , UpperCamelCase_=2000 , UpperCamelCase_=1 , UpperCamelCase_=0.0 , UpperCamelCase_=1_6000 , UpperCamelCase_=True , UpperCamelCase_=True , ):
lowercase_ :Any = parent
lowercase_ :Any = batch_size
lowercase_ :int = min_seq_length
lowercase_ :Optional[int] = max_seq_length
lowercase_ :Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ :Any = feature_size
lowercase_ :str = padding_value
lowercase_ :Optional[int] = sampling_rate
lowercase_ :int = return_attention_mask
lowercase_ :Optional[Any] = do_normalize
def UpperCamelCase ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , UpperCamelCase_=False , UpperCamelCase_=False ):
def _flatten(UpperCamelCase_ ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
lowercase_ :Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowercase_ :List[Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ :int = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =WavaVecaFeatureExtractor
def UpperCamelCase ( self ):
lowercase_ :Tuple = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.assertTrue(np.all(np.mean(UpperCamelCase_ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowercase_ :int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Any = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ :Any = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowercase_ :List[str] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test batched
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
lowercase_ :Tuple = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ :Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ :int = np.asarray(UpperCamelCase_ )
lowercase_ :List[str] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Any = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase_ :int = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = feat_extract(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''np''' )
lowercase_ :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Union[str, Any] = range(800 , 1400 , 200 )
lowercase_ :Optional[int] = [floats_list((1, x) )[0] for x in lengths]
lowercase_ :Any = ['''longest''', '''max_length''', '''do_not_pad''']
lowercase_ :Optional[Any] = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = feat_extract(UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ )
lowercase_ :Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :str = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
lowercase_ :Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Optional[int] = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
lowercase_ :Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
lowercase_ :Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ :Optional[int] = feat_extract(
UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
lowercase_ :int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase ( self ):
import torch
lowercase_ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ :Any = np.random.rand(100 ).astype(np.floataa )
lowercase_ :Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ :List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowercase_ :Dict = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase ( self ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowercase_ :List[Any] = WavaVecaConfig.from_pretrained(UpperCamelCase_ )
lowercase_ :Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 252 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['MobileViTFeatureExtractor']
snake_case_ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 24 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] )->Union[str, Any]:
if hor == 1_28:
A__ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
A__ = (32, 1_28, 2_56)
A__ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
A__ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
A__ = (32, 64, 1_28, 2_56)
A__ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
A__ = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
A__ = model.state_dict()
A__ = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 6_55_36,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
A__ = UNetaDModel(**UpperCamelCase__ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
A__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( )->int:
A__ = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 1_28, 2_56),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 6_55_36,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
A__ = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
A__ = model
A__ = UNetaDModel(**UpperCamelCase__ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
A__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A__ = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 39 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__SCREAMING_SNAKE_CASE = '''CIDAS/clipseg-rd64-refined'''
__SCREAMING_SNAKE_CASE = '''image_segmenter'''
__SCREAMING_SNAKE_CASE = CLIPSegForImageSegmentation
__SCREAMING_SNAKE_CASE = ['''image''', '''text''']
__SCREAMING_SNAKE_CASE = ['''image''']
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
requires_backends(self,['''vision'''] )
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
return self.pre_processor(text=[label],images=[image],padding=__lowerCamelCase,return_tensors='''pt''' )
def UpperCamelCase ( self,__lowerCamelCase ):
with torch.no_grad():
A__ = self.model(**__lowerCamelCase ).logits
return logits
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = outputs.cpu().detach().numpy()
A__ = 0
A__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 39 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : str = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = emb.weight.shape
_lowerCAmelCase : Any = nn.Linear(_lowerCamelCase ,_lowerCamelCase ,bias=_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Any="facebook/mbart-large-en-ro" ,_lowerCamelCase : List[Any]=False ,_lowerCamelCase : Any=False ) -> int:
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase ,map_location="""cpu""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
_lowerCAmelCase : List[str] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_lowerCAmelCase : Any = MBartConfig.from_pretrained(_lowerCamelCase ,vocab_size=_lowerCamelCase )
if mbart_aa and finetuned:
_lowerCAmelCase : Any = """relu"""
_lowerCAmelCase : Tuple = state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase : Optional[int] = MBartForConditionalGeneration(_lowerCamelCase )
model.model.load_state_dict(_lowerCamelCase )
if finetuned:
_lowerCAmelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
_a : Optional[Any] = parser.parse_args()
_a : List[str] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 44 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int | float] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if len(_lowerCAmelCase ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_lowerCAmelCase )
or left < -len(_lowerCAmelCase )
or right >= len(_lowerCAmelCase )
or right < -len(_lowerCAmelCase )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
UpperCAmelCase : List[Any] = (left + right) >> 1 # the middle
UpperCAmelCase : Optional[Any] = find_max(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # find max in range[left, mid]
UpperCAmelCase : Dict = find_max(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 23 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase ='src/transformers'
# Matches is_xxx_available()
lowercase =re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowercase =re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase =re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowercase =re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowercase =re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase =re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase =re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase =re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowercase =re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowercase =re.compile(r'^\s*try:')
# Catches a line with else:
lowercase =re.compile(r'^\s*else:')
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if _re_test_backend.search(__lowerCamelCase ) is None:
return None
_UpperCAmelCase : List[Any] =[b[0] for b in _re_backend.findall(__lowerCamelCase )]
backends.sort()
return "_and_".join(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
with open(__lowerCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
_UpperCAmelCase : Optional[Any] =f.readlines()
_UpperCAmelCase : List[Any] =0
while line_index < len(__lowerCamelCase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_UpperCAmelCase : Tuple =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
_UpperCAmelCase : Any =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCamelCase ):
_UpperCAmelCase : Union[str, Any] =_re_one_line_import_struct.search(__lowerCamelCase ).groups()[0]
_UpperCAmelCase : Optional[Any] =re.findall('\[([^\]]+)\]' , __lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
_UpperCAmelCase : Any =_re_import_struct_key_value.search(__lowerCamelCase )
if single_line_import_search is not None:
_UpperCAmelCase : int =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
_UpperCAmelCase : int ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_UpperCAmelCase : Tuple =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : Any =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
_UpperCAmelCase : int =lines[line_index]
if _re_import_struct_add_one.search(__lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCamelCase ) is not None:
_UpperCAmelCase : Optional[int] =_re_import_struct_add_many.search(__lowerCamelCase ).groups()[0].split(', ' )
_UpperCAmelCase : Any =[obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_between_brackets.search(__lowerCamelCase ) is not None:
_UpperCAmelCase : Optional[Any] =_re_between_brackets.search(__lowerCamelCase ).groups()[0].split(', ' )
_UpperCAmelCase : str =[obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_quote_object.search(__lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCamelCase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 1_2 + '"' ):
objects.append(line[1_3:-3] )
line_index += 1
_UpperCAmelCase : str =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_UpperCAmelCase : Dict =[]
while (
line_index < len(__lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
_UpperCAmelCase : Optional[Any] =lines[line_index]
_UpperCAmelCase : int =_re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_UpperCAmelCase : Union[str, Any] ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_UpperCAmelCase : List[str] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_UpperCAmelCase : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_UpperCAmelCase : Union[str, Any] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
_UpperCAmelCase : Union[str, Any] =lines[line_index]
_UpperCAmelCase : Dict =_re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
_UpperCAmelCase : int =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
def find_duplicates(__lowerCamelCase : Tuple ):
return [k for k, v in collections.Counter(__lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_UpperCAmelCase : Optional[int] =[]
for key in import_dict_objects.keys():
_UpperCAmelCase : Optional[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
_UpperCAmelCase : Any =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_UpperCAmelCase : Dict ='base imports' if key == 'none' else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Any =[]
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
_UpperCAmelCase : Optional[int] =os.path.join(__lowerCamelCase , '__init__.py' )
_UpperCAmelCase : List[Any] =parse_init(__lowerCamelCase )
if objects is not None:
_UpperCAmelCase : List[str] =analyze_results(*__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_UpperCAmelCase : Union[str, Any] =f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append('\n'.join(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
raise ValueError('\n\n'.join(__lowerCamelCase ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : int =[]
for path, directories, files in os.walk(__lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCamelCase ) / folder).glob('*.py' ) ) ) == 0:
continue
_UpperCAmelCase : Optional[Any] =str((Path(__lowerCamelCase ) / folder).relative_to(__lowerCamelCase ) )
_UpperCAmelCase : List[str] =short_path.replace(os.path.sep , '.' )
submodules.append(__lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_UpperCAmelCase : Optional[int] =str((Path(__lowerCamelCase ) / fname).relative_to(__lowerCamelCase ) )
_UpperCAmelCase : Dict =short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__lowerCamelCase )
return submodules
lowercase =[
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Tuple =importlib.util.spec_from_file_location(
'transformers' , os.path.join(__lowerCamelCase , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_UpperCAmelCase : int =spec.loader.load_module()
_UpperCAmelCase : Dict =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__lowerCamelCase ) > 0:
_UpperCAmelCase : Tuple ='\n'.join(f"- {module}" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
f"{list_of_modules}\n"
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 352 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return choice(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : int =random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
_UpperCAmelCase : str =[e for e in lst if e < pivot]
_UpperCAmelCase : Dict =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase , k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 0 |
def lowerCAmelCase__ ( lowerCamelCase_ : int = 1000):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 1, 1
lowerCAmelCase__ : List[str] = []
for i in range(1 ,n + 1):
lowerCAmelCase__ : int = prev_numerator + 2 * prev_denominator
lowerCAmelCase__ : int = prev_numerator + prev_denominator
if len(str(lowerCamelCase_)) > len(str(lowerCamelCase_)):
result.append(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = numerator
lowerCAmelCase__ : List[Any] = denominator
return len(lowerCamelCase_)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 129 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 | 1 |
'''simple docstring'''
from __future__ import annotations
A__ : str = '''Muhammad Umer Farooq'''
A__ : int = '''MIT'''
A__ : Optional[int] = '''1.0.0'''
A__ : List[Any] = '''Muhammad Umer Farooq'''
A__ : Optional[Any] = '''contact@muhammadumerfarooq.me'''
A__ : Optional[Any] = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __a : str ) -> None:
'''simple docstring'''
super().__init__()
__snake_case : list[str] = []
__snake_case : Dict = domain
def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None:
'''simple docstring'''
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case : Optional[Any] = parse.urljoin(self.domain , __a )
self.urls.append(__a )
def a_ ( _UpperCAmelCase : str ) -> str:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] )
def a_ ( _UpperCAmelCase : str ) -> str:
return parse.urlparse(_UpperCAmelCase ).netloc
def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]:
__snake_case : List[Any] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
__snake_case : Tuple = Parser(_UpperCAmelCase )
try:
# Open URL
__snake_case : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case : List[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
__snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = emails_from_url('''https://github.com''')
print(F"""{len(emails)} emails found:""")
print('''\n'''.join(sorted(emails)))
| 352 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __lowercase ( lowerCAmelCase__ ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] = 42
_UpperCAmelCase : Optional[int] = None
def A_ ( _UpperCAmelCase , _UpperCAmelCase=0.9_9_9 , _UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
SCREAMING_SNAKE_CASE_: Any = []
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_: str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] = 1000 , lowerCAmelCase__ : Union[str, Any] = "fixed_small_log" , lowerCAmelCase__ : Any = True , lowerCAmelCase__ : Any = 1.0 , lowerCAmelCase__ : Union[str, Any] = "epsilon" , lowerCAmelCase__ : Optional[Any] = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'")
SCREAMING_SNAKE_CASE_: List[Any] = betas_for_alpha_bar(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = 1.0 - self.betas
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.cumprod(self.alphas , dim=0)
SCREAMING_SNAKE_CASE_: Tuple = torch.tensor(1.0)
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_: Any = 1.0
# setable values
SCREAMING_SNAKE_CASE_: str = None
SCREAMING_SNAKE_CASE_: List[str] = torch.from_numpy(np.arange(0 , lowerCAmelCase__)[::-1].copy())
SCREAMING_SNAKE_CASE_: Optional[int] = variance_type
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any = None):
return sample
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple = None):
SCREAMING_SNAKE_CASE_: Any = num_inference_steps
SCREAMING_SNAKE_CASE_: str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
SCREAMING_SNAKE_CASE_: int = (np.arange(0 , lowerCAmelCase__) * step_ratio).round()[::-1].copy().astype(np.intaa)
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(lowerCAmelCase__).to(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : str=None):
if prev_timestep is None:
SCREAMING_SNAKE_CASE_: Union[str, Any] = t - 1
SCREAMING_SNAKE_CASE_: List[str] = self.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_: Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
SCREAMING_SNAKE_CASE_: Optional[int] = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE_: List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
SCREAMING_SNAKE_CASE_: str = self.betas[t]
else:
SCREAMING_SNAKE_CASE_: Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE_: Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
SCREAMING_SNAKE_CASE_: List[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE_: Optional[int] = torch.log(torch.clamp(lowerCAmelCase__ , min=1E-20))
SCREAMING_SNAKE_CASE_: Optional[int] = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = variance.log()
SCREAMING_SNAKE_CASE_: Union[str, Any] = beta.log()
SCREAMING_SNAKE_CASE_: Tuple = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE_: Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] = None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Tuple = True , ):
SCREAMING_SNAKE_CASE_: int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = torch.split(lowerCAmelCase__ , sample.shape[1] , dim=1)
else:
SCREAMING_SNAKE_CASE_: List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
SCREAMING_SNAKE_CASE_: Optional[Any] = t - 1
SCREAMING_SNAKE_CASE_: Optional[Any] = self.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_: Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
SCREAMING_SNAKE_CASE_: Any = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE_: int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
SCREAMING_SNAKE_CASE_: List[str] = self.betas[t]
SCREAMING_SNAKE_CASE_: str = self.alphas[t]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
SCREAMING_SNAKE_CASE_: Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_: int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_: Optional[Any] = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
" for the UnCLIPScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_: Any = torch.clamp(
lowerCAmelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_: int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
SCREAMING_SNAKE_CASE_: Union[str, Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_: Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
SCREAMING_SNAKE_CASE_: str = 0
if t > 0:
SCREAMING_SNAKE_CASE_: int = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase__ , device=model_output.device)
SCREAMING_SNAKE_CASE_: List[Any] = self._get_variance(
lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , )
if self.variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE_: Union[str, Any] = variance
elif self.variance_type == "learned_range":
SCREAMING_SNAKE_CASE_: int = (0.5 * variance).exp()
else:
raise ValueError(
F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
" for the UnCLIPScheduler.")
SCREAMING_SNAKE_CASE_: str = variance * variance_noise
SCREAMING_SNAKE_CASE_: Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
SCREAMING_SNAKE_CASE_: List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
SCREAMING_SNAKE_CASE_: Optional[Any] = timesteps.to(original_samples.device)
SCREAMING_SNAKE_CASE_: Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
SCREAMING_SNAKE_CASE_: Union[str, Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
SCREAMING_SNAKE_CASE_: str = sqrt_alpha_prod.unsqueeze(-1)
SCREAMING_SNAKE_CASE_: List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
SCREAMING_SNAKE_CASE_: Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
SCREAMING_SNAKE_CASE_: int = sqrt_one_minus_alpha_prod.unsqueeze(-1)
SCREAMING_SNAKE_CASE_: int = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 13 | """simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> list:
if len(UpperCAmelCase ) <= 1:
return [tuple(UpperCAmelCase )]
snake_case_ = []
def generate(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = [0] * n
res.append(tuple(UpperCAmelCase ) )
snake_case_ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case_ , snake_case_ = arr[i], arr[0]
else:
snake_case_ , snake_case_ = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase ) )
c[i] += 1
snake_case_ = 0
else:
snake_case_ = 0
i += 1
generate(len(UpperCAmelCase ) , UpperCAmelCase )
return res
if __name__ == "__main__":
__UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 69 | 0 |
from __future__ import annotations
from math import gcd
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Optional[Any] = 2 , snake_case_ : str = 1 , snake_case_ : Dict = 3 , ):
if num < 2:
raise ValueError("The input value cannot be less than 2" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : str ) -> int:
return (pow(__snake_case , 2 ) + step) % modulus
for _ in range(__snake_case ):
# These track the position within the cycle detection logic.
snake_case__ : List[Any] = seed
snake_case__ : Any = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
snake_case__ : str = rand_fn(__snake_case , __snake_case , __snake_case )
snake_case__ : int = rand_fn(__snake_case , __snake_case , __snake_case )
snake_case__ : Optional[int] = rand_fn(__snake_case , __snake_case , __snake_case )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
snake_case__ : Union[str, Any] = gcd(hare - tortoise , __snake_case )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
snake_case__ : Any = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""num""",
type=int,
help="""The value to find a divisor of""",
)
parser.add_argument(
"""--attempts""",
type=int,
default=3,
help="""The number of attempts before giving up""",
)
__lowerCamelCase : Optional[int] = parser.parse_args()
__lowerCamelCase : Any = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"{args.num} is probably prime")
else:
__lowerCamelCase : Any = args.num // divisor
print(f"{args.num} = {divisor} * {quotient}")
| 362 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["image_processor", "tokenizer"]
a_ = "ViltImageProcessor"
a_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , __A : Optional[int]=None , __A : Optional[Any]=None , **__A : int ):
snake_case__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
snake_case__ : Tuple = kwargs.pop("feature_extractor" )
snake_case__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
snake_case__ : Tuple = self.image_processor
def __call__( self : List[Any] , __A : int , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
snake_case__ : Optional[int] = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
snake_case__ : Optional[Any] = self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def _lowercase ( self : Optional[Any] , *__A : List[str] , **__A : Optional[int] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowercase ( self : Dict , *__A : str , **__A : str ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = self.tokenizer.model_input_names
snake_case__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _lowercase ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 286 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
SCREAMING_SNAKE_CASE__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : tuple , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , use_external_data_format=SCREAMING_SNAKE_CASE , enable_onnx_checker=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
else:
export(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE , output_names=SCREAMING_SNAKE_CASE , dynamic_axes=SCREAMING_SNAKE_CASE , do_constant_folding=SCREAMING_SNAKE_CASE , opset_version=SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowerCAmelCase = """cpu"""
lowerCAmelCase = Path(SCREAMING_SNAKE_CASE )
# VAE DECODER
lowerCAmelCase = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowerCAmelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCAmelCase = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=SCREAMING_SNAKE_CASE , )
del vae_decoder
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 46 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
A__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Tuple , **__lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Union[str, Any] , **__lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : List[str] , **__lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : str ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : str ) -> Any:
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = processor(text=__lowerCAmelCase )
A__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 274 | 0 |
import argparse
import datetime
def A__ ( lowerCamelCase ) -> List[Any]:
UpperCamelCase_: List[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
UpperCamelCase_: List[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
UpperCamelCase_: int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
UpperCamelCase_: str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
UpperCamelCase_: int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
UpperCamelCase_: str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
UpperCamelCase_: int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
UpperCamelCase_: str = datetime.date(int(lowerCamelCase ) , int(lowerCamelCase ) , int(lowerCamelCase ) )
# Start math
if m <= 2:
UpperCamelCase_: Any = y - 1
UpperCamelCase_: int = m + 12
# maths var
UpperCamelCase_: int = int(str(lowerCamelCase )[:2] )
UpperCamelCase_: int = int(str(lowerCamelCase )[2:] )
UpperCamelCase_: int = int(2.6 * m - 5.39 )
UpperCamelCase_: int = int(c / 4 )
UpperCamelCase_: int = int(k / 4 )
UpperCamelCase_: int = int(d + k )
UpperCamelCase_: int = int(t + u + v + x )
UpperCamelCase_: int = int(z - (2 * c) )
UpperCamelCase_: int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
UpperCamelCase_: str = F'''Your date {date_input}, is a {days[str(lowerCamelCase )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowerCamelCase_ : List[Any] = parser.parse_args()
zeller(args.date_input)
| 370 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """luke"""
def __init__( self : Tuple , snake_case_ : List[Any]=5_0267 , snake_case_ : Any=50_0000 , snake_case_ : str=768 , snake_case_ : int=256 , snake_case_ : str=12 , snake_case_ : int=12 , snake_case_ : Dict=3072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : int=512 , snake_case_ : Dict=2 , snake_case_ : List[Any]=0.02 , snake_case_ : int=1e-12 , snake_case_ : Union[str, Any]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=1 , snake_case_ : Optional[int]=0 , snake_case_ : List[str]=2 , **snake_case_ : Union[str, Any] , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Tuple = entity_vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: Any = entity_emb_size
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: Dict = intermediate_size
UpperCamelCase_: str = hidden_dropout_prob
UpperCamelCase_: List[str] = attention_probs_dropout_prob
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: int = type_vocab_size
UpperCamelCase_: List[Any] = initializer_range
UpperCamelCase_: Union[str, Any] = layer_norm_eps
UpperCamelCase_: Tuple = use_entity_aware_attention
UpperCamelCase_: int = classifier_dropout
| 223 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __lowerCAmelCase ():
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = 1
_snake_case = 2
while i * i <= n:
_snake_case = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_snake_case = 1
_snake_case = 1
while True:
i += 1
t_num += i
if count_divisors(__A ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 42 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 354 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["BeitFeatureExtractor"]
__magic_name__ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 'roformer'
def __init__( self : Optional[int] , lowercase_ : int=50_000 , lowercase_ : Optional[int]=None , lowercase_ : Tuple=768 , lowercase_ : Any=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=3_072 , lowercase_ : Optional[Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : str=1_536 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : int=1E-12 , lowercase_ : Any=0 , lowercase_ : Optional[Any]=False , lowercase_ : List[str]=True , **lowercase_ : Union[str, Any] , ) -> Any:
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : List[Any] = hidden_size if embedding_size is None else embedding_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[int] = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : int = max_position_embeddings
UpperCAmelCase : List[str] = type_vocab_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : Optional[Any] = layer_norm_eps
UpperCAmelCase : Optional[Any] = rotary_value
UpperCAmelCase : int = use_cache
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> List[Any]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : int = {0: 'batch', 1: 'sequence'}
UpperCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 151 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__lowerCAmelCase : int =logging.getLogger(__name__)
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = False
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if not self.initialized:
lowercase = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowercase = True
def A__ ( self ):
"""simple docstring"""
self.retriever.index.init_index()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.retriever._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(__lowerCAmelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for worker in self.retrieval_workers
] )
def A__ ( self ):
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowercase , lowercase = ray.get(random_worker.retrieve.remote(__lowerCAmelCase , __lowerCAmelCase ) )
else:
lowercase , lowercase = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
return super(__lowerCAmelCase , cls ).get_tokenizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.pop("""config""" , __lowerCAmelCase ) or RagConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = RagTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
lowercase = rag_tokenizer.question_encoder
lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase = """custom"""
lowercase = CustomHFIndex(config.retrieval_vector_size , __lowerCAmelCase )
else:
lowercase = cls._build_index(__lowerCAmelCase )
return cls(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , retrieval_workers=__lowerCAmelCase , index=__lowerCAmelCase , )
| 197 | 0 |
"""simple docstring"""
import numpy
class UpperCamelCase :
def __init__(self : Optional[int] , _A : numpy.ndarray , _A : numpy.ndarray) -> None:
__snake_case : Tuple = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__snake_case : List[str] = numpy.random.rand(
self.input_array.shape[1] , 4)
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__snake_case : Optional[Any] = numpy.random.rand(
4 , 3)
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__snake_case : Optional[int] = numpy.random.rand(3 , 1)
# Real output values provided.
__snake_case : Optional[Any] = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__snake_case : List[str] = numpy.zeros(output_array.shape)
def _lowercase (self : Optional[int]) -> numpy.ndarray:
__snake_case : List[str] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights))
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__snake_case : List[str] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__snake_case : str = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return self.layer_between_second_hidden_layer_and_output
def _lowercase (self : Dict) -> None:
__snake_case : int = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , )
__snake_case : int = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , )
__snake_case : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowercase (self : Optional[int] , _A : numpy.ndarray , _A : int , _A : bool) -> None:
for iteration in range(1 , iterations + 1):
__snake_case : List[str] = self.feedforward()
self.back_propagation()
if give_loss:
__snake_case : Any = numpy.mean(numpy.square(output - self.feedforward()))
print(f"Iteration {iteration} Loss: {loss}")
def _lowercase (self : List[Any] , _A : numpy.ndarray) -> int:
__snake_case : Any = input_arr
__snake_case : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights))
__snake_case : List[str] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ))
__snake_case : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ))
return int(self.layer_between_second_hidden_layer_and_output > 0.6)
def __UpperCAmelCase ( UpperCAmelCase_ : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __UpperCAmelCase ( UpperCAmelCase_ : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def __UpperCAmelCase ( ) -> int:
'''simple docstring'''
__snake_case : Tuple = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__snake_case : Tuple = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__snake_case : str = TwoHiddenLayerNeuralNetwork(
input_array=UpperCAmelCase_ , output_array=UpperCAmelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCAmelCase_ , iterations=10 , give_loss=UpperCAmelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 359 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Union[str, Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Union[str, Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : List[str] = 1, 0, a
__snake_case , __snake_case , __snake_case : Dict = 0, 1, m
while va != 0:
__snake_case : List[str] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 95 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=99 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=37 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=5_12 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=0 ,) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = parent
__SCREAMING_SNAKE_CASE :int = batch_size
__SCREAMING_SNAKE_CASE :Optional[int] = seq_length
__SCREAMING_SNAKE_CASE :int = is_training
__SCREAMING_SNAKE_CASE :Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE :Dict = use_token_type_ids
__SCREAMING_SNAKE_CASE :List[str] = use_labels
__SCREAMING_SNAKE_CASE :List[str] = vocab_size
__SCREAMING_SNAKE_CASE :Tuple = hidden_size
__SCREAMING_SNAKE_CASE :Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE :Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE :Dict = intermediate_size
__SCREAMING_SNAKE_CASE :Any = hidden_act
__SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :int = max_position_embeddings
__SCREAMING_SNAKE_CASE :List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE :List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE :str = initializer_range
__SCREAMING_SNAKE_CASE :str = num_labels
__SCREAMING_SNAKE_CASE :str = num_choices
__SCREAMING_SNAKE_CASE :Optional[Any] = scope
__SCREAMING_SNAKE_CASE :int = projection_dim
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE :Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
__SCREAMING_SNAKE_CASE :Dict = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE :str = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE :Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :int = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE :Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE :Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE :str = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,)
__SCREAMING_SNAKE_CASE :List[Any] = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,(
__SCREAMING_SNAKE_CASE
) ,
) :List[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE :Any = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE( A , A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : List[Any] = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = TFDPRModelTester(self )
__SCREAMING_SNAKE_CASE :List[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Tuple = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :str = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Optional[int] = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
__SCREAMING_SNAKE_CASE :Tuple = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
__SCREAMING_SNAKE_CASE :Optional[Any] = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
__SCREAMING_SNAKE_CASE :Optional[Any] = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1E-4 ) ) | 191 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int ) -> bool:
if not isinstance(a_ , a_ ):
__SCREAMING_SNAKE_CASE :int = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 0:
return False
__SCREAMING_SNAKE_CASE :Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 191 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A (__A : int , __A : int , __A : int , __A : int , __A : int , __A : int ) -> np.ndarray:
"""simple docstring"""
if (ksize % 2) == 0:
UpperCAmelCase_ = ksize + 1
UpperCAmelCase_ = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__A ):
for x in range(__A ):
# distance from center
UpperCAmelCase_ = x - ksize // 2
UpperCAmelCase_ = y - ksize // 2
# degree to radiant
UpperCAmelCase_ = theta / 180 * np.pi
UpperCAmelCase_ = np.cos(_theta )
UpperCAmelCase_ = np.sin(_theta )
# get kernel x
UpperCAmelCase_ = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase_ = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
snake_case_ : Dict = imread("../image_data/lena.jpg")
# turn image in gray scale value
snake_case_ : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
snake_case_ : int = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
snake_case_ : int = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
snake_case_ : Tuple = out / out.max() * 255
snake_case_ : Union[str, Any] = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.