code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) // 2
__SCREAMING_SNAKE_CASE = arr[0:mid]
__SCREAMING_SNAKE_CASE = arr[mid:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_ )
# an empty list should also have zero inversions
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = count_inversions_bf(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 54 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : Tuple = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Optional[int] = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_a : Tuple = []
_a : Any = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
_a : Any = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
_a : Dict = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
_a : List[Any] = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
_a : Optional[Any] = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(F"""Following is minimal change for {value}: """)
_a : int = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 366 | """simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : bool = False ) -> str:
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = f"Expected string as input, found {type(_lowerCamelCase )}"
raise ValueError(_lowerCamelCase )
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(_lowerCamelCase )}"
raise ValueError(_lowerCamelCase )
_lowerCAmelCase : str = input_str.split("""_""" )
_lowerCAmelCase : Optional[int] = 0 if use_pascal else 1
_lowerCAmelCase : List[Any] = words[start_index:]
_lowerCAmelCase : Union[str, Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCAmelCase : List[Any] = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 126 | 0 |
import math
import flax.linen as nn
import jax.numpy as jnp
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0e4 , lowerCamelCase__ = False , lowerCamelCase__ = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
__lowerCamelCase : Optional[int] = float(embedding_dim // 2 )
__lowerCamelCase : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowerCamelCase : Tuple = min_timescale * jnp.exp(jnp.arange(lowerCamelCase__ , dtype=jnp.floataa ) * -log_timescale_increment )
__lowerCamelCase : List[str] = jnp.expand_dims(lowerCamelCase__ , 1 ) * jnp.expand_dims(lowerCamelCase__ , 0 )
# scale embeddings
__lowerCamelCase : Dict = scale * emb
if flip_sin_to_cos:
__lowerCamelCase : List[Any] = jnp.concatenate([jnp.cos(lowerCamelCase__ ), jnp.sin(lowerCamelCase__ )] , axis=1 )
else:
__lowerCamelCase : Dict = jnp.concatenate([jnp.sin(lowerCamelCase__ ), jnp.cos(lowerCamelCase__ )] , axis=1 )
__lowerCamelCase : Union[str, Any] = jnp.reshape(lowerCamelCase__ , [jnp.shape(lowerCamelCase__ )[0], embedding_dim] )
return signal
class A_ ( nn.Module ):
_UpperCAmelCase : int = 32
_UpperCAmelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : str = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name='linear_1')(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = nn.silu(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = nn.Dense(self.time_embed_dim ,dtype=self.dtype ,name='linear_2')(SCREAMING_SNAKE_CASE__)
return temb
class A_ ( nn.Module ):
_UpperCAmelCase : int = 32
_UpperCAmelCase : bool = False
_UpperCAmelCase : float = 1
@nn.compact
def __call__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str]):
return get_sinusoidal_embeddings(
SCREAMING_SNAKE_CASE__ ,embedding_dim=self.dim ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.freq_shift)
| 73 |
from bisect import bisect
from itertools import accumulate
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = sorted(zip(lowerCamelCase__ , lowerCamelCase__ ) , key=lambda lowerCamelCase__ : x[0] / x[1] , reverse=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase : Any = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : List[str] = list(accumulate(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = bisect(lowerCamelCase__ , lowerCamelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=lowercase_ ):
__A = ["keras_nlp"]
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
requires_backends(self , ["""keras_nlp"""] )
| 354 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __A ( unittest.TestCase ):
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase =Vector()
def _snake_case ( self ):
lowerCamelCase =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase_ ) , """(0,0,0,0,0,1)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2] )
lowerCamelCase =Vector([1, 2, 3, 4, 5] )
lowerCamelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def _snake_case ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def _snake_case ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase_ , UpperCAmelCase_ ) ) , """(3,4,7)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase =x.copy()
self.assertEqual(str(UpperCAmelCase_ ) , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase_ ) , """(0,1,0)""" )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase =Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def _snake_case ( self ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 262 | 0 |
"""simple docstring"""
from math import pow, sqrt
def __a ( *_SCREAMING_SNAKE_CASE ) ->bool:
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 290 | """simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
while a != 0:
a__ , a__: List[str] = b % a, a
return b
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Union[str, Any] = 1, 0, a
a__ , a__ , a__: Any = 0, 1, m
while va != 0:
a__: int = ua // va
a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowerCAmelCase : List[Any] = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
lowerCAmelCase : Tuple = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ConvBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any="[UNK]" , lowerCAmelCase__ : Optional[Any]="[SEP]" , lowerCAmelCase__ : Any="[PAD]" , lowerCAmelCase__ : Dict="[CLS]" , lowerCAmelCase__ : Dict="[MASK]" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Dict , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_: Optional[int] = getattr(lowerCAmelCase__ , normalizer_state.pop("type"))
SCREAMING_SNAKE_CASE_: Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_: List[str] = strip_accents
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_: Optional[int] = normalizer_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None):
SCREAMING_SNAKE_CASE_: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
SCREAMING_SNAKE_CASE_: Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 370 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/dummy-config.json""")
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = 0
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto"))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.from_pretrained("bert-base-uncased")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.for_model("roberta")
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE_: int = os.path.join(lowerCAmelCase__ , "fake-roberta")
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__ , "config.json") , "w") as f:
f.write(json.dumps({}))
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertEqual(type(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
try:
AutoConfig.register("custom" , lowerCAmelCase__)
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoConfig.register("model" , lowerCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__):
AutoConfig.register("bert" , lowerCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_: List[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = AutoConfig.from_pretrained(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _SCREAMING_SNAKE_CASE ( self : List[str]):
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier"):
SCREAMING_SNAKE_CASE_: List[str] = AutoConfig.from_pretrained("bert-base")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
with self.assertRaisesRegex(
lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model")
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfig")
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__)
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig")
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''new-model'''
try:
AutoConfig.register("new-model" , lowerCAmelCase__)
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model")
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal")
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE_: Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal")
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__)
self.assertEqual(config.__class__.__name__ , "NewModelConfig")
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 127 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , 'vision' )
requires_backends(self , 'torch' )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = {}
lowerCAmelCase__ :Tuple = {}
lowerCAmelCase__ :Any = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCAmelCase__ :Dict = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
lowerCAmelCase__ :Union[str, Any] = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
lowerCAmelCase__ :Any = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
lowerCAmelCase__ :Any = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
lowerCAmelCase__ :Dict = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCAmelCase__ :Tuple = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
lowerCAmelCase__ :Optional[int] = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
lowerCAmelCase__ :List[Any] = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
lowerCAmelCase__ :Optional[Any] = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
lowerCAmelCase__ :int = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
lowerCAmelCase__ :Union[str, Any] = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
lowerCAmelCase__ :Optional[Any] = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=6_4 , __UpperCAmelCase = 0 , __UpperCAmelCase = 5_1_2 / 1_5_0_0 , __UpperCAmelCase = 3_2 , __UpperCAmelCase = 1 , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = load_image(__UpperCAmelCase )
lowerCAmelCase__ :int = self.image_processor.size['longest_edge']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = self.image_processor.generate_crop_boxes(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.image_processor(images=__UpperCAmelCase , return_tensors='pt' )
with self.device_placement():
if self.framework == "pt":
lowerCAmelCase__ :Optional[int] = self.get_inference_context()
with inference_context():
lowerCAmelCase__ :Any = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device )
lowerCAmelCase__ :Tuple = self.model.get_image_embeddings(model_inputs.pop('pixel_values' ) )
lowerCAmelCase__ :Optional[int] = image_embeddings
lowerCAmelCase__ :List[Any] = grid_points.shape[1]
lowerCAmelCase__ :Union[str, Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '
'To return all points at once, set points_per_batch to None' )
for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = grid_points[:, i : i + points_per_batch, :, :]
lowerCAmelCase__ :List[str] = input_labels[:, i : i + points_per_batch]
lowerCAmelCase__ :List[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ):
'''simple docstring'''
lowerCAmelCase__ :Any = model_inputs.pop('input_boxes' )
lowerCAmelCase__ :Optional[int] = model_inputs.pop('is_last' )
lowerCAmelCase__ :Dict = model_inputs.pop('original_sizes' ).tolist()
lowerCAmelCase__ :Dict = model_inputs.pop('reshaped_input_sizes' ).tolist()
lowerCAmelCase__ :Optional[int] = self.model(**__UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCAmelCase__ :int = model_outputs['pred_masks']
lowerCAmelCase__ :Optional[Any] = self.image_processor.post_process_masks(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase )
lowerCAmelCase__ :Any = model_outputs['iou_scores']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ):
'''simple docstring'''
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Optional[Any] = []
lowerCAmelCase__ :int = []
for model_output in model_outputs:
all_scores.append(model_output.pop('iou_scores' ) )
all_masks.extend(model_output.pop('masks' ) )
all_boxes.append(model_output.pop('boxes' ) )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = self.image_processor.post_process_for_mask_generation(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Tuple = defaultdict(__UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {}
if output_rle_mask:
lowerCAmelCase__ :str = rle_mask
if output_bboxes_mask:
lowerCAmelCase__ :Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 293 |
"""simple docstring"""
import math
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(_SCREAMING_SNAKE_CASE )
if number < 1:
lowerCAmelCase__ :Dict = F"Input value of [number={number}] must be > 0"
raise ValueError(_SCREAMING_SNAKE_CASE )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase__ :Union[str, Any] = int(math.log(number // 3 , 2 ) ) + 2
lowerCAmelCase__ :Optional[Any] = [3, 5]
lowerCAmelCase__ :Optional[Any] = 2
lowerCAmelCase__ :List[str] = 3
for block in range(1 , _SCREAMING_SNAKE_CASE ):
for _ in range(_SCREAMING_SNAKE_CASE ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__A = 0
try:
__A = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 293 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = str(_lowerCAmelCase )
return len(_lowerCAmelCase ) == 9 and set(_lowerCAmelCase ) == set("123456789" )
def UpperCamelCase_( ):
'''simple docstring'''
for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ):
snake_case_ = 1_0_0_0_0_2 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
for base_num in range(3_3_3 , 9_9 , -1 ):
snake_case_ = 1_0_0_2_0_0_3 * base_num
if is_9_pandigital(_lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 371 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_SCREAMING_SNAKE_CASE : Any = False
class _snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 92 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
return 10 - x * x
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
if equation(_UpperCamelCase ) * equation(_UpperCamelCase ) >= 0:
raise ValueError('Wrong space!' )
__lowerCamelCase = a
while (b - a) >= 0.01:
# Find middle point
__lowerCamelCase = (a + b) / 2
# Check if middle point is root
if equation(_UpperCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_UpperCamelCase ) * equation(_UpperCamelCase ) < 0:
__lowerCamelCase = c
else:
__lowerCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 90 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
UpperCamelCase__ = 3
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
UpperCamelCase__ = random.randrange(3 , a__ )
if pow(a__ , 2 , a__ ) == 1:
continue
if pow(a__ , a__ , a__ ) == 1:
continue
return g
def _UpperCamelCase (a__ :int ):
"""simple docstring"""
print("""Generating prime p...""" )
UpperCamelCase__ = rabin_miller.generate_large_prime(a__ ) # select large prime number.
UpperCamelCase__ = primitive_root(a__ ) # one primitive root on modulo p.
UpperCamelCase__ = random.randrange(3 , a__ ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase__ = cryptomath.find_mod_inverse(pow(a__ , a__ , a__ ) , a__ )
UpperCamelCase__ = (key_size, e_a, e_a, p)
UpperCamelCase__ = (key_size, d)
return public_key, private_key
def _UpperCamelCase (a__ :str , a__ :int ):
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCamelCase__ , UpperCamelCase__ = generate_key(a__ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , """w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , """w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def _UpperCamelCase ():
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 87 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = """ZinengTang/tvlt-base"""
UpperCamelCase__ = tempfile.mkdtemp()
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 87 | 1 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __snake_case ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = RoFormerTokenizer
lowerCAmelCase_ = RoFormerTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def __a ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
def __a ( self : int , **_lowercase : Optional[Any] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def __a ( self : str , **_lowercase : Optional[int] ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowercase )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """永和服装饰品有限公司,今天天气非常好"""
SCREAMING_SNAKE_CASE__ = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , output_text.split() )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
pass
def __a ( self : Dict ):
"""simple docstring"""
pass
def __a ( self : str ):
"""simple docstring"""
pass
| 219 | import doctest
from collections import deque
import numpy as np
class __snake_case :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(_lowercase , _lowercase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(_lowercase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(_lowercase )
for j, item in enumerate(_lowercase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(_lowercase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_lowercase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 219 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _A ( __magic_name__ ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_snake_case = '''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
def UpperCAmelCase ( _lowercase :ArgumentParser ):
'''simple docstring'''
lowercase__ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=__a , required=__a , help="Model\'s type." )
train_parser.add_argument(
"--tf_checkpoint" , type=__a , required=__a , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=__a , required=__a , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=__a , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=__a , default=__a , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=__a )
def __init__( self :Tuple , _lowercase :str , _lowercase :str , _lowercase :str , _lowercase :str , _lowercase :str , *_lowercase :int , ):
'''simple docstring'''
lowercase__ = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'''Loading model {model_type}''' )
lowercase__ = model_type
lowercase__ = tf_checkpoint
lowercase__ = pytorch_dump_output
lowercase__ = config
lowercase__ = finetuning_task_name
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
if "ckpt" in self._tf_checkpoint.lower():
lowercase__ = self._tf_checkpoint
lowercase__ = ''
else:
lowercase__ = self._tf_checkpoint
lowercase__ = ''
convert_transfo_xl_checkpoint_to_pytorch(
__a , self._config , self._pytorch_dump_output , __a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 350 |
import math
def _A ( __magic_name__ ):
lowercase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def _A ( __magic_name__ = 1 / 1_2345 ):
lowercase__ = 0
lowercase__ = 0
lowercase__ = 3
while True:
lowercase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
lowercase__ = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 201 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a :
def __init__( self , _lowerCamelCase = None ):
if components is None:
lowercase = []
lowercase = list(lowerCAmelCase_ )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self , _lowerCamelCase ):
lowercase = len(self )
if size == len(lowerCAmelCase_ ):
lowercase = [self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception('must have the same size' )
def __sub__( self , _lowerCamelCase ):
lowercase = len(self )
if size == len(lowerCAmelCase_ ):
lowercase = [self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , _lowerCamelCase ):
...
@overload
def __mul__( self , _lowerCamelCase ):
...
def __mul__( self , _lowerCamelCase ):
if isinstance(lowerCAmelCase_ , (float, int) ):
lowercase = [c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
lowercase = len(self )
lowercase = [self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase_ ( self ):
return Vector(self.__components )
def UpperCamelCase_ ( self , _lowerCamelCase ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
assert -len(self.__components ) <= pos < len(self.__components )
lowercase = value
def UpperCamelCase_ ( self ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
lowercase = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False ):
lowercase = self * other
lowercase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case )
return Vector([0] * dimension )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Any ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (isinstance(__snake_case , __snake_case ))
lowercase = [0] * dimension
lowercase = 1
return Vector(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (isinstance(__snake_case , (int, float) ))
)
return x * scalar + y
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
random.seed(__snake_case )
lowercase = [random.randint(__snake_case , __snake_case ) for _ in range(__snake_case )]
return Vector(__snake_case )
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = matrix
lowercase = w
lowercase = h
def __str__( self ):
lowercase = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _lowerCamelCase ):
if self.__width == other.width() and self.__height == other.height():
lowercase = []
for i in range(self.__height ):
lowercase = [
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , _lowerCamelCase ):
if self.__width == other.width() and self.__height == other.height():
lowercase = []
for i in range(self.__height ):
lowercase = [
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , _lowerCamelCase ):
...
@overload
def __mul__( self , _lowerCamelCase ):
...
def __mul__( self , _lowerCamelCase ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
lowercase = zero_vector(self.__height )
for i in range(self.__height ):
lowercase = [
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
lowercase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def UpperCamelCase_ ( self ):
return self.__height
def UpperCamelCase_ ( self ):
return self.__width
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
lowercase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
lowercase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase_ ( self ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
lowercase = [[0] * n for _ in range(__snake_case )]
return Matrix(__snake_case , __snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Optional[int] , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
random.seed(__snake_case )
lowercase = [
[random.randint(__snake_case , __snake_case ) for _ in range(__snake_case )] for _ in range(__snake_case )
]
return Matrix(__snake_case , __snake_case , __snake_case )
| 220 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__UpperCamelCase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__UpperCamelCase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
__UpperCamelCase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
__UpperCamelCase : str = "question"
__UpperCamelCase : str = "context"
__UpperCamelCase : str = "answers"
@property
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 121 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
class a_ ( _snake_case ):
UpperCamelCase__ : Tuple ="AutoTokenizer"
UpperCamelCase__ : Optional[int] =["tokenizer"]
UpperCamelCase__ : List[str] ={
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self :Tuple , _lowercase :str , _lowercase :Optional[int]=None) -> Union[str, Any]:
super().__init__(_lowercase)
UpperCAmelCase_ = speaker_embeddings
@classmethod
def __a ( cls :int , _lowercase :Optional[Any] , _lowercase :Union[str, Any]="speaker_embeddings_path.json" , **_lowercase :Dict) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
UpperCAmelCase_ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop('''subfolder''' , _lowercase) , cache_dir=kwargs.pop('''cache_dir''' , _lowercase) , force_download=kwargs.pop('''force_download''' , _lowercase) , proxies=kwargs.pop('''proxies''' , _lowercase) , resume_download=kwargs.pop('''resume_download''' , _lowercase) , local_files_only=kwargs.pop('''local_files_only''' , _lowercase) , use_auth_token=kwargs.pop('''use_auth_token''' , _lowercase) , revision=kwargs.pop('''revision''' , _lowercase) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(_lowercase , _lowercase)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.")
UpperCAmelCase_ = None
else:
with open(_lowercase) as speaker_embeddings_json:
UpperCAmelCase_ = json.load(_lowercase)
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase)
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase)
def __a ( self :int , _lowercase :int , _lowercase :Optional[int]="speaker_embeddings_path.json" , _lowercase :Tuple="speaker_embeddings" , _lowercase :bool = False , **_lowercase :int , ) -> Tuple:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , '''v2''') , exist_ok=_lowercase)
UpperCAmelCase_ = {}
UpperCAmelCase_ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCAmelCase_ = self._load_voice_preset(_lowercase)
UpperCAmelCase_ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , _lowercase , f"{prompt_key}_{key}") , voice_preset[key] , allow_pickle=_lowercase , )
UpperCAmelCase_ = os.path.join(_lowercase , f"{prompt_key}_{key}.npy")
UpperCAmelCase_ = tmp_dict
with open(os.path.join(_lowercase , _lowercase) , '''w''') as fp:
json.dump(_lowercase , _lowercase)
super().save_pretrained(_lowercase , _lowercase , **_lowercase)
def __a ( self :int , _lowercase :str = None , **_lowercase :Optional[Any]) -> str:
UpperCAmelCase_ = self.speaker_embeddings[voice_preset]
UpperCAmelCase_ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].")
UpperCAmelCase_ = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''') , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , _lowercase) , cache_dir=kwargs.pop('''cache_dir''' , _lowercase) , force_download=kwargs.pop('''force_download''' , _lowercase) , proxies=kwargs.pop('''proxies''' , _lowercase) , resume_download=kwargs.pop('''resume_download''' , _lowercase) , local_files_only=kwargs.pop('''local_files_only''' , _lowercase) , use_auth_token=kwargs.pop('''use_auth_token''' , _lowercase) , revision=kwargs.pop('''revision''' , _lowercase) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/') , voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.")
UpperCAmelCase_ = np.load(_lowercase)
return voice_preset_dict
def __a ( self :Tuple , _lowercase :Optional[dict] = None) -> List[str]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key.")
if not isinstance(voice_preset[key] , np.ndarray):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.")
def __call__( self :Tuple , _lowercase :List[str]=None , _lowercase :Dict=None , _lowercase :Optional[int]="pt" , _lowercase :Optional[int]=256 , _lowercase :Dict=False , _lowercase :Dict=True , _lowercase :str=False , **_lowercase :Union[str, Any] , ) -> Tuple:
if voice_preset is not None and not isinstance(_lowercase , _lowercase):
if (
isinstance(_lowercase , _lowercase)
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCAmelCase_ = self._load_voice_preset(_lowercase)
else:
if isinstance(_lowercase , _lowercase) and not voice_preset.endswith('''.npz'''):
UpperCAmelCase_ = voice_preset + '''.npz'''
UpperCAmelCase_ = np.load(_lowercase)
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase)
UpperCAmelCase_ = BatchFeature(data=_lowercase , tensor_type=_lowercase)
UpperCAmelCase_ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding='''max_length''' , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
UpperCAmelCase_ = voice_preset
return encoded_text
| 344 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "spiece.model"}
UpperCamelCase_ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase_ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
UpperCamelCase_ = 4
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Any ="left"
def __init__( self :Optional[int] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=True , _lowercase :Union[str, Any]=False , _lowercase :Tuple="<s>" , _lowercase :Any="</s>" , _lowercase :Dict="<unk>" , _lowercase :str="<sep>" , _lowercase :Tuple="<pad>" , _lowercase :Any="<cls>" , _lowercase :List[str]="<mask>" , _lowercase :Union[str, Any]=["<eop>", "<eod>"] , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Union[str, Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase) if isinstance(_lowercase , _lowercase) else mask_token
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase_ = 3
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = remove_space
UpperCAmelCase_ = keep_accents
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowercase)
@property
def __a ( self :int) -> List[Any]:
return len(self.sp_model)
def __a ( self :Optional[int]) -> List[Any]:
UpperCAmelCase_ = {self.convert_ids_to_tokens(_lowercase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Optional[Any]) -> List[Any]:
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __a ( self :List[str] , _lowercase :Tuple) -> Optional[int]:
if self.remove_space:
UpperCAmelCase_ = ''' '''.join(inputs.strip().split())
else:
UpperCAmelCase_ = inputs
UpperCAmelCase_ = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
UpperCAmelCase_ = unicodedata.normalize('''NFKD''' , _lowercase)
UpperCAmelCase_ = ''''''.join([c for c in outputs if not unicodedata.combining(_lowercase)])
if self.do_lower_case:
UpperCAmelCase_ = outputs.lower()
return outputs
def __a ( self :str , _lowercase :str) -> List[str]:
UpperCAmelCase_ = self.preprocess_text(_lowercase)
UpperCAmelCase_ = self.sp_model.encode(_lowercase , out_type=_lowercase)
UpperCAmelCase_ = []
for piece in pieces:
if len(_lowercase) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
UpperCAmelCase_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowercase , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
UpperCAmelCase_ = cur_pieces[1:]
else:
UpperCAmelCase_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_lowercase)
else:
new_pieces.append(_lowercase)
return new_pieces
def __a ( self :Optional[Any] , _lowercase :Union[str, Any]) -> Tuple:
return self.sp_model.PieceToId(_lowercase)
def __a ( self :Optional[int] , _lowercase :Optional[Any]) -> List[str]:
return self.sp_model.IdToPiece(_lowercase)
def __a ( self :List[Any] , _lowercase :Optional[Any]) -> int:
UpperCAmelCase_ = ''''''.join(_lowercase).replace(_lowercase , ''' ''').strip()
return out_string
def __a ( self :Union[str, Any] , _lowercase :List[int] , _lowercase :bool = False , _lowercase :bool = None , _lowercase :bool = True , **_lowercase :Tuple , ) -> str:
UpperCAmelCase_ = kwargs.pop('''use_source_tokenizer''' , _lowercase)
UpperCAmelCase_ = self.convert_ids_to_tokens(_lowercase , skip_special_tokens=_lowercase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
UpperCAmelCase_ = []
sub_texts.append(_lowercase)
else:
current_sub_text.append(_lowercase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowercase))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
UpperCAmelCase_ = ''''''.join(_lowercase)
UpperCAmelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase_ = self.clean_up_tokenization(_lowercase)
return clean_text
else:
return text
def __a ( self :str , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __a ( self :Dict , _lowercase :List[int] , _lowercase :Optional[List[int]] = None , _lowercase :bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase)
if token_ids_a is not None:
return ([0] * len(_lowercase)) + [1] + ([0] * len(_lowercase)) + [1, 1]
return ([0] * len(_lowercase)) + [1, 1]
def __a ( self :Optional[int] , _lowercase :List[int] , _lowercase :Optional[List[int]] = None) -> List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def __a ( self :str , _lowercase :str , _lowercase :Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_lowercase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
UpperCAmelCase_ = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowercase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowercase)
elif not os.path.isfile(self.vocab_file):
with open(_lowercase , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowercase)
return (out_vocab_file,)
| 344 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["image_processor", "tokenizer"]
snake_case__ = "Pix2StructImageProcessor"
snake_case__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = False
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : Optional[int] ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = 2_048 ,lowerCamelCase__ : int = 0 ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCamelCase__ : Union[str, Any] ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase__ = self.tokenizer
UpperCAmelCase__ = self.tokenizer(
text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase__ = self.image_processor(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,max_patches=lowerCamelCase__ ,**lowerCamelCase__ )
else:
# add pixel_values and bbox
UpperCAmelCase__ = self.image_processor(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,max_patches=lowerCamelCase__ ,header_text=lowerCamelCase__ ,**lowerCamelCase__ )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase__ = self.tokenizer(
text=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,padding=lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=lowerCamelCase__ ,stride=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_overflowing_tokens=lowerCamelCase__ ,return_special_tokens_mask=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_length=lowerCamelCase__ ,verbose=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ ,)
if "attention_mask" in text_encoding:
UpperCAmelCase__ = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
UpperCAmelCase__ = text_encoding.pop('input_ids' )
else:
UpperCAmelCase__ = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Union[str, Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Tuple ):
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 98 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
UpperCAmelCase__ : Optional[int] = False
if num < 0:
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Union[str, Any] = -num
UpperCAmelCase__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase__ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 | 0 |
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_a = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Tuple, *UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict=None, UpperCAmelCase__ : int=None, UpperCAmelCase__ : Tuple=None, **UpperCAmelCase__ : List[Any] ):
super().__init__(*UpperCAmelCase__, **UpperCAmelCase__ )
__lowercase = eval_examples
__lowercase = post_process_function
__lowercase = quant_trainer_args
__lowercase = 1_2_8 # default number of calibration samples
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[str]=None ):
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("Trainer: calibration requires an calib_dataset." )
__lowercase = calib_dataset if calib_dataset is not None else self.calib_dataset
__lowercase = self._remove_unused_columns(UpperCAmelCase__, description="Calibration" )
return DataLoader(
UpperCAmelCase__, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, pin_memory=self.args.dataloader_pin_memory, shuffle=UpperCAmelCase__, )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[int]=None ):
__lowercase = self.train_dataset if calib_dataset is None else calib_dataset
__lowercase = self.get_calib_dataloader(UpperCAmelCase__ )
__lowercase = self.model
quant_trainer.configure_model(UpperCAmelCase__, self.quant_trainer_args, calib=UpperCAmelCase__ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase__ )
logger.info("***** Running calibration *****" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCAmelCase__ ):
# Prediction step
__lowercase ,__lowercase ,__lowercase = self.prediction_step(UpperCAmelCase__, UpperCAmelCase__, prediction_loss_only=UpperCAmelCase__ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase__, self.quant_trainer_args )
__lowercase = model
def _lowercase ( self : List[Any], UpperCAmelCase__ : Tuple=None, UpperCAmelCase__ : List[Any]=None, UpperCAmelCase__ : Dict=None, UpperCAmelCase__ : str = "eval" ):
__lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase = self.get_eval_dataloader(UpperCAmelCase__ )
__lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase = eval_loop(
UpperCAmelCase__, description="Evaluation", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=UpperCAmelCase__, )
finally:
__lowercase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
__lowercase = self.post_process_function(UpperCAmelCase__, UpperCAmelCase__, output.predictions )
__lowercase = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase = metrics.pop(UpperCAmelCase__ )
self.log(UpperCAmelCase__ )
else:
__lowercase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase = self.callback_handler.on_evaluate(self.args, self.state, self.control, UpperCAmelCase__ )
return metrics
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int]=None, UpperCAmelCase__ : str = "test" ):
__lowercase = self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase = eval_loop(
UpperCAmelCase__, description="Prediction", prediction_loss_only=True if compute_metrics is None else None, ignore_keys=UpperCAmelCase__, )
finally:
__lowercase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase = self.post_process_function(UpperCAmelCase__, UpperCAmelCase__, output.predictions, "predict" )
__lowercase = self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase = metrics.pop(UpperCAmelCase__ )
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Optional[int]="./" ):
__lowercase = self.eval_dataset
__lowercase = self.get_eval_dataloader(UpperCAmelCase__ )
__lowercase = next(iter(UpperCAmelCase__ ) )
# saving device - to make it consistent
__lowercase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
# convert to tuple
__lowercase = tuple(v.to(UpperCAmelCase__ ) for k, v in batch.items() )
logger.info("Converting model to be onnx compatible" )
from pytorch_quantization.nn import TensorQuantizer
__lowercase = True
__lowercase = self.model.to(UpperCAmelCase__ )
model.eval()
model.float()
__lowercase = model.module if hasattr(UpperCAmelCase__, "module" ) else model
quant_trainer.configure_model(UpperCAmelCase__, self.quant_trainer_args )
__lowercase = os.path.join(UpperCAmelCase__, "model.onnx" )
logger.info(F"""exporting model to {output_model_file}""" )
__lowercase = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, export_params=UpperCAmelCase__, opset_version=1_3, do_constant_folding=UpperCAmelCase__, input_names=["input_ids", "attention_mask", "token_type_ids"], output_names=["output_start_logits", "output_end_logits"], dynamic_axes={
"input_ids": axes,
"attention_mask": axes,
"token_type_ids": axes,
"output_start_logits": axes,
"output_end_logits": axes,
}, verbose=UpperCAmelCase__, )
logger.info("onnx export finished" )
| 353 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 144 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a__ : List[str] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict , __UpperCAmelCase : Union[str, Any]):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
a : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase)
def __snake_case ( self : Optional[int]):
a : int = "sshleifer/tiny-gpt2"
a : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : int = PyTorchBenchmark(_lowerCamelCase)
a : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : int):
a : List[str] = "sgugger/tiny-distilbert-classification"
a : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
a : List[Any] = PyTorchBenchmark(_lowerCamelCase)
a : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Any):
a : Any = "sshleifer/tiny-gpt2"
a : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase)
a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def __snake_case ( self : Union[str, Any]):
a : Tuple = "sshleifer/tiny-gpt2"
a : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : Optional[int] = PyTorchBenchmark(_lowerCamelCase)
a : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Union[str, Any]):
a : str = "sshleifer/tiny-gpt2"
a : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase)
# set architectures equal to `None`
a : int = None
a : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : Optional[int] = PyTorchBenchmark(_lowerCamelCase , configs=[config])
a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : str):
a : Optional[Any] = "sshleifer/tiny-gpt2"
a : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase)
a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can\'t do half precision")
def __snake_case ( self : int):
a : Tuple = "sshleifer/tiny-gpt2"
a : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
a : Union[str, Any] = PyTorchBenchmark(_lowerCamelCase)
a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : List[str]):
a : Dict = "sshleifer/tiny-gpt2"
a : Optional[int] = AutoConfig.from_pretrained(_lowerCamelCase)
a : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : List[str] = PyTorchBenchmark(_lowerCamelCase , configs=[config])
a : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : List[str]):
a : Any = "sshleifer/tinier_bart"
a : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
a : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : List[Any] = PyTorchBenchmark(_lowerCamelCase , configs=[config])
a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def __snake_case ( self : Dict):
a : Union[str, Any] = "sshleifer/tiny-gpt2"
a : List[str] = AutoConfig.from_pretrained(_lowerCamelCase)
a : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : List[str] = PyTorchBenchmark(_lowerCamelCase , configs=[config])
a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : Optional[Any]):
a : str = "sshleifer/tinier_bart"
a : int = AutoConfig.from_pretrained(_lowerCamelCase)
a : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
a : Any = PyTorchBenchmark(_lowerCamelCase , configs=[config])
a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def __snake_case ( self : Optional[Any]):
a : int = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
a : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(_lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(_lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(_lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(_lowerCamelCase , "env.csv") , multi_process=_lowerCamelCase , )
a : Any = PyTorchBenchmark(_lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(_lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(_lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(_lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(_lowerCamelCase , "env.csv")).exists())
def __snake_case ( self : Tuple):
a : List[Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : Union[str, Any]):
self.assertTrue(hasattr(_lowerCamelCase , "sequential"))
self.assertTrue(hasattr(_lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(_lowerCamelCase , "current"))
self.assertTrue(hasattr(_lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
a : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , "log.txt") , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
a : Optional[Any] = PyTorchBenchmark(_lowerCamelCase)
a : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_lowerCamelCase , "log.txt")).exists())
| 368 |
"""simple docstring"""
import sys
import turtle
def lowercase ( A_ , A_ )-> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase ( A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 226 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def A_ ( A__ ) -> Optional[Any]:
a__ : Tuple = VideoMAEConfig()
set_architecture_configs(__lowerCAmelCase , __lowerCAmelCase )
if "finetuned" not in model_name:
a__ : Any = False
if "finetuned" in model_name:
a__ : Union[str, Any] = 'huggingface/label-files'
if "kinetics" in model_name:
a__ : List[Any] = 400
a__ : List[str] = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
a__ : Tuple = 174
a__ : Union[str, Any] = 'something-something-v2-id2label.json'
else:
raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' )
a__ : Optional[Any] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ : Any = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ : Dict = idalabel
a__ : Any = {v: k for k, v in idalabel.items()}
return config
def A_ ( A__ , A__ ) -> str:
if "small" in model_name:
a__ : int = 384
a__ : Optional[int] = 1536
a__ : Tuple = 12
a__ : str = 16
a__ : Optional[int] = 12
a__ : int = 3
a__ : Union[str, Any] = 192
a__ : Union[str, Any] = 768
elif "large" in model_name:
a__ : List[str] = 1024
a__ : Optional[Any] = 4096
a__ : int = 24
a__ : Tuple = 16
a__ : Optional[Any] = 12
a__ : Optional[int] = 8
a__ : List[str] = 512
a__ : Any = 2048
elif "huge" in model_name:
a__ : Tuple = 1280
a__ : Tuple = 5120
a__ : Dict = 32
a__ : Optional[Any] = 16
a__ : Tuple = 12
a__ : Dict = 8
a__ : Optional[Any] = 640
a__ : Any = 2560
elif "base" not in model_name:
raise ValueError('Model name should include either \"small\", \"base\", \"large\", or \"huge\"' )
def A_ ( A__ ) -> Any:
if "encoder." in name:
a__ : List[Any] = name.replace('encoder.' , '' )
if "cls_token" in name:
a__ : List[str] = name.replace('cls_token' , 'videomae.embeddings.cls_token' )
if "decoder_pos_embed" in name:
a__ : str = name.replace('decoder_pos_embed' , 'decoder.decoder_pos_embed' )
if "pos_embed" in name and "decoder" not in name:
a__ : Tuple = name.replace('pos_embed' , 'videomae.embeddings.position_embeddings' )
if "patch_embed.proj" in name:
a__ : str = name.replace('patch_embed.proj' , 'videomae.embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a__ : Union[str, Any] = name.replace('patch_embed.norm' , 'videomae.embeddings.norm' )
if "decoder.blocks" in name:
a__ : List[Any] = name.replace('decoder.blocks' , 'decoder.decoder_layers' )
if "blocks" in name:
a__ : Tuple = name.replace('blocks' , 'videomae.encoder.layer' )
if "attn.proj" in name:
a__ : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "bias" not in name:
a__ : Any = name.replace('attn' , 'attention.self' )
if "attn" in name:
a__ : List[str] = name.replace('attn' , 'attention.attention' )
if "norm1" in name:
a__ : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a__ : List[str] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a__ : Tuple = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a__ : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "decoder_embed" in name:
a__ : Optional[int] = name.replace('decoder_embed' , 'decoder.decoder_embed' )
if "decoder_norm" in name:
a__ : List[str] = name.replace('decoder_norm' , 'decoder.decoder_norm' )
if "decoder_pred" in name:
a__ : Optional[int] = name.replace('decoder_pred' , 'decoder.decoder_pred' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
a__ : Optional[int] = name.replace('norm.weight' , 'videomae.layernorm.weight' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
a__ : Union[str, Any] = name.replace('norm.bias' , 'videomae.layernorm.bias' )
if "head" in name and "decoder" not in name:
a__ : Union[str, Any] = name.replace('head' , 'classifier' )
return name
def A_ ( A__ , A__ ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
a__ : List[Any] = orig_state_dict.pop(__lowerCAmelCase )
if key.startswith('encoder.' ):
a__ : Optional[int] = key.replace('encoder.' , '' )
if "qkv" in key:
a__ : Optional[Any] = key.split('.' )
if key.startswith('decoder.blocks' ):
a__ : Any = config.decoder_hidden_size
a__ : List[str] = int(key_split[2] )
a__ : Optional[Any] = 'decoder.decoder_layers.'
if "weight" in key:
a__ : List[str] = val[:dim, :]
a__ : List[str] = val[dim : dim * 2, :]
a__ : str = val[-dim:, :]
else:
a__ : Tuple = config.hidden_size
a__ : Any = int(key_split[1] )
a__ : int = 'videomae.encoder.layer.'
if "weight" in key:
a__ : int = val[:dim, :]
a__ : str = val[dim : dim * 2, :]
a__ : int = val[-dim:, :]
else:
a__ : int = val
return orig_state_dict
def A_ ( ) -> Dict:
a__ : Any = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a__ : Union[str, Any] = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
def A_ ( A__ , A__ , A__ , A__ ) -> Optional[Any]:
a__ : List[Any] = get_videomae_config(__lowerCAmelCase )
if "finetuned" in model_name:
a__ : Union[str, Any] = VideoMAEForVideoClassification(__lowerCAmelCase )
else:
a__ : Any = VideoMAEForPreTraining(__lowerCAmelCase )
# download original checkpoint, hosted on Google Drive
a__ : str = 'pytorch_model.bin'
gdown.cached_download(__lowerCAmelCase , __lowerCAmelCase , quiet=__lowerCAmelCase )
a__ : Tuple = torch.load(__lowerCAmelCase , map_location='cpu' )
if "model" in files:
a__ : Tuple = files['model']
else:
a__ : List[Any] = files['module']
a__ : List[Any] = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify model on basic input
a__ : List[str] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
a__ : Union[str, Any] = prepare_video()
a__ : List[Any] = image_processor(__lowerCAmelCase , return_tensors='pt' )
if "finetuned" not in model_name:
a__ : int = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
a__ : Optional[int] = torch.load(__lowerCAmelCase )
a__ : int = model(**__lowerCAmelCase )
a__ : int = outputs.logits
a__ : Union[str, Any] = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
a__ : Dict = torch.Size([1, 400] )
a__ : Optional[Any] = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
a__ : int = torch.Size([1, 174] )
a__ : List[str] = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
a__ : Dict = torch.Size([1, 1408, 1536] )
a__ : Union[str, Any] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
a__ : Any = torch.Size([1, 1408, 1536] )
a__ : Union[str, Any] = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
a__ : List[str] = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
a__ : Optional[Any] = torch.Size([1, 1408, 1536] )
a__ : str = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
a__ : Optional[Any] = torch.Size([1, 400] )
a__ : Optional[int] = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
a__ : int = torch.Size([1, 400] )
a__ : Tuple = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
a__ : Dict = torch.Size([1, 400] )
a__ : Dict = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
a__ : Dict = torch.Size([1, 400] )
a__ : int = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
a__ : int = torch.Size([1, 1408, 1536] )
a__ : Dict = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
a__ : int = torch.Size([1, 174] )
a__ : Optional[int] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
a__ : Dict = torch.Size([1, 1408, 1536] )
a__ : Dict = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
a__ : str = torch.Size([1, 174] )
a__ : Tuple = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(F'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
else:
print('Logits:' , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('Logits ok!' )
# verify loss, if applicable
if model_name == "videomae-base-short":
a__ : Dict = outputs.loss
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-4 )
print('Loss ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('Pushing to the hub...' )
model.push_to_hub(__lowerCAmelCase , organization='nielsr' )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase : List[str] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 99 |
"""simple docstring"""
from math import isqrt, loga
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> list[int]:
'''simple docstring'''
lowercase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = False
return [i for i in range(2 , __lowerCAmelCase ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 80_08_00 , __lowerCAmelCase = 80_08_00 ) -> int:
'''simple docstring'''
lowercase_ = degree * loga(__lowerCAmelCase )
lowercase_ = int(__lowerCAmelCase )
lowercase_ = calculate_prime_numbers(__lowerCAmelCase )
lowercase_ = 0
lowercase_ = 0
lowercase_ = len(__lowerCAmelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 136 | 0 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCamelCase__ : Any = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 196 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __a ( unittest.TestCase ):
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[Any]=56 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : List[str]=99 , SCREAMING_SNAKE_CASE : str=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Dict=7 , SCREAMING_SNAKE_CASE : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Any=5_12 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : int="block_sparse" , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Dict=3 , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Union[str, Any] = seq_length
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[int] = use_attention_mask
UpperCamelCase__ : List[str] = use_token_type_ids
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : List[str] = num_attention_heads
UpperCamelCase__ : int = intermediate_size
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Tuple = hidden_dropout_prob
UpperCamelCase__ : Any = attention_probs_dropout_prob
UpperCamelCase__ : str = max_position_embeddings
UpperCamelCase__ : Tuple = type_vocab_size
UpperCamelCase__ : Dict = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Any = num_choices
UpperCamelCase__ : Dict = rescale_embeddings
UpperCamelCase__ : Union[str, Any] = attention_type
UpperCamelCase__ : int = use_bias
UpperCamelCase__ : List[Any] = block_size
UpperCamelCase__ : Union[str, Any] = num_random_blocks
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Any = None
if self.use_attention_mask:
UpperCamelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Any = None
if self.use_token_type_ids:
UpperCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : List[Any] = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Tuple = config_and_inputs
UpperCamelCase__ : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = False
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def __lowercase ( self : str ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ : Any = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : List[Any] ):
return model(input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
with self.subTest("JIT Enabled" ):
UpperCamelCase__ : Tuple = model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCamelCase__ : List[Any] = model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any=1e-5 , SCREAMING_SNAKE_CASE : Tuple="outputs" , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) | 196 | 1 |
"""simple docstring"""
from math import ceil
def __A ( a_ :int = 10_01) -> int:
__a : List[str] = 1
for i in range(1 , int(ceil(n / 2.0))):
__a : int = 2 * i + 1
__a : Union[str, Any] = 2 * i
__a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''') | 160 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a : Dict = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : Union[str, Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Union[str, Any] = vocab_file
__a : int = False if not self.vocab_file else True
__a : List[str] = extra_ids
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCAmelCase , )
return max_model_length
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : List[str] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()] | 160 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : str , snake_case_ : List[str] , snake_case_ : List[str]=13 , snake_case_ : Optional[int]=7 , snake_case_ : Optional[int]=True , snake_case_ : Any=True , snake_case_ : Tuple=True , snake_case_ : Any=True , snake_case_ : int=99 , snake_case_ : int=32 , snake_case_ : Tuple=5 , snake_case_ : Union[str, Any]=4 , snake_case_ : Dict=37 , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=0.1 , snake_case_ : str=0.1 , snake_case_ : int=512 , snake_case_ : Dict=16 , snake_case_ : Optional[Any]=2 , snake_case_ : int=0.02 , snake_case_ : List[Any]=False , snake_case_ : str=True , snake_case_ : Any="None" , snake_case_ : int=3 , snake_case_ : Tuple=4 , snake_case_ : List[Any]=None , ):
UpperCamelCase_: Union[str, Any] = parent
UpperCamelCase_: Optional[int] = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: int = is_training
UpperCamelCase_: Optional[Any] = use_input_mask
UpperCamelCase_: str = use_token_type_ids
UpperCamelCase_: str = use_labels
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Any = num_hidden_layers
UpperCamelCase_: Union[str, Any] = num_attention_heads
UpperCamelCase_: List[Any] = intermediate_size
UpperCamelCase_: List[Any] = hidden_act
UpperCamelCase_: Optional[Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: Optional[Any] = max_position_embeddings
UpperCamelCase_: Tuple = type_vocab_size
UpperCamelCase_: Any = type_sequence_label_size
UpperCamelCase_: Optional[Any] = initializer_range
UpperCamelCase_: Union[str, Any] = num_labels
UpperCamelCase_: Tuple = num_choices
UpperCamelCase_: List[str] = relative_attention
UpperCamelCase_: List[str] = position_biased_input
UpperCamelCase_: str = pos_att_type
UpperCamelCase_: Any = scope
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Optional[int] = None
if self.use_input_mask:
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase_: List[str] = None
if self.use_token_type_ids:
UpperCamelCase_: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_: Any = None
UpperCamelCase_: List[str] = None
UpperCamelCase_: Tuple = None
if self.use_labels:
UpperCamelCase_: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_: Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Union[str, Any] ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: List[str] = self.get_config()
UpperCamelCase_: Union[str, Any] = 300
return config
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : str ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] ):
UpperCamelCase_: Any = DebertaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: List[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase_: Tuple = model(snake_case_ , token_type_ids=snake_case_ )[0]
UpperCamelCase_: Optional[int] = model(snake_case_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase__ ( self : int , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : str ):
UpperCamelCase_: List[Any] = DebertaForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
UpperCamelCase_: Tuple = self.num_labels
UpperCamelCase_: Optional[int] = DebertaForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Dict = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : List[str] ):
UpperCamelCase_: int = self.num_labels
UpperCamelCase_: List[str] = DebertaForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : int , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
UpperCamelCase_: Dict = DebertaForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_: Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = self.prepare_config_and_inputs()
(
UpperCamelCase_
): List[str] = config_and_inputs
UpperCamelCase_: str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[int] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Dict = False
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Union[str, Any] = DebertaModelTester(self )
UpperCamelCase_: Tuple = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*snake_case_ )
@slow
def lowerCAmelCase__ ( self : Any ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Optional[Any] = DebertaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowerCAmelCase__ ( self : List[str] ):
pass
@slow
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Tuple = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
UpperCamelCase_: int = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase_: str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase_: str = model(snake_case_ , attention_mask=snake_case_ )[0]
# compare the actual values for a slice.
UpperCamelCase_: List[str] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 360 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
UpperCamelCase_: Optional[int] = word.split()
def justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = max_width - width
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
if len(lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase_: List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase_: Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase_: List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase_: Dict = []
for i in range(lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase )
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = 0
for word in words:
if width + len(lowerCamelCase ) + len(lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase )
width += len(lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
# reset new line and new width
UpperCamelCase_, UpperCamelCase_: List[str] = [word], len(lowerCamelCase )
UpperCamelCase_: List[str] = max_width - width - len(lowerCamelCase )
answer.append(""" """.join(lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 223 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
UpperCamelCase = logging.getLogger(__name__)
@dataclass
class snake_case_ :
__A : Optional[int] = field(
default=128 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
__A : bool = field(
default=snake_case_ ,metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__A : bool = field(
default=snake_case_ ,metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} ,)
__A : Optional[int] = field(
default=snake_case_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
__A : Optional[int] = field(
default=snake_case_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
__A : Optional[int] = field(
default=snake_case_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} ,)
@dataclass
class snake_case_ :
__A : str = field(
default=snake_case_ ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__A : str = field(
default=snake_case_ ,metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
__A : Optional[str] = field(
default=snake_case_ ,metadata={"help": "Train language if it is different from the evaluation language."} )
__A : Optional[str] = field(
default=snake_case_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__A : Optional[str] = field(
default=snake_case_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__A : Optional[str] = field(
default=snake_case_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
__A : Optional[bool] = field(
default=snake_case_ ,metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} ,)
__A : bool = field(
default=snake_case_ ,metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} ,)
__A : str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
__A : bool = field(
default=snake_case_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
__A : bool = field(
default=snake_case_ ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def lowercase_ ( ):
lowercase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , __UpperCamelCase)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : List[Any] = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase)
datasets.utils.logging.set_verbosity(__UpperCamelCase)
transformers.utils.logging.set_verbosity(__UpperCamelCase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(f'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
lowercase__ : Tuple = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : str = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Set seed before initializing model.
set_seed(training_args.seed)
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase__ : Tuple = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ : Dict = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[str] = train_dataset.features["label"].names
if training_args.do_eval:
lowercase__ : Any = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Tuple = eval_dataset.features["label"].names
if training_args.do_predict:
lowercase__ : List[str] = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[Any] = predict_dataset.features["label"].names
# Labels
lowercase__ : Optional[int] = len(__UpperCamelCase)
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , idalabel={str(__UpperCamelCase): label for i, label in enumerate(__UpperCamelCase)} , labelaid={label: i for i, label in enumerate(__UpperCamelCase)} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : int = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ : Optional[Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ : Optional[int] = False
def preprocess_function(_lowerCamelCase : List[str]):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=__UpperCamelCase , max_length=data_args.max_seq_length , truncation=__UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ : List[Any] = min(len(__UpperCamelCase) , data_args.max_train_samples)
lowercase__ : Union[str, Any] = train_dataset.select(range(__UpperCamelCase))
with training_args.main_process_first(desc="train dataset map pre-processing"):
lowercase__ : Union[str, Any] = train_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__UpperCamelCase)) , 3):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''')
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ : Any = min(len(__UpperCamelCase) , data_args.max_eval_samples)
lowercase__ : Any = eval_dataset.select(range(__UpperCamelCase))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
lowercase__ : List[str] = eval_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase__ : Optional[int] = min(len(__UpperCamelCase) , data_args.max_predict_samples)
lowercase__ : List[str] = predict_dataset.select(range(__UpperCamelCase))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
lowercase__ : List[str] = predict_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowercase__ : List[Any] = evaluate.load("xnli")
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase : EvalPrediction):
lowercase__ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , __UpperCamelCase) else p.predictions
lowercase__ : Optional[int] = np.argmax(__UpperCamelCase , axis=1)
return metric.compute(predictions=__UpperCamelCase , references=p.label_ids)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowercase__ : Dict = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8)
else:
lowercase__ : Tuple = None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCamelCase , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , )
# Training
if training_args.do_train:
lowercase__ : List[str] = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Optional[int] = last_checkpoint
lowercase__ : Union[str, Any] = trainer.train(resume_from_checkpoint=__UpperCamelCase)
lowercase__ : int = train_result.metrics
lowercase__ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCamelCase)
)
lowercase__ : List[Any] = min(__UpperCamelCase , len(__UpperCamelCase))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , __UpperCamelCase)
trainer.save_metrics("train" , __UpperCamelCase)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
lowercase__ : Union[str, Any] = trainer.evaluate(eval_dataset=__UpperCamelCase)
lowercase__ : Union[str, Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCamelCase)
lowercase__ : List[str] = min(__UpperCamelCase , len(__UpperCamelCase))
trainer.log_metrics("eval" , __UpperCamelCase)
trainer.save_metrics("eval" , __UpperCamelCase)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
lowercase__ , lowercase__ , lowercase__ : str = trainer.predict(__UpperCamelCase , metric_key_prefix="predict")
lowercase__ : Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__UpperCamelCase)
)
lowercase__ : Dict = min(__UpperCamelCase , len(__UpperCamelCase))
trainer.log_metrics("predict" , __UpperCamelCase)
trainer.save_metrics("predict" , __UpperCamelCase)
lowercase__ : Tuple = np.argmax(__UpperCamelCase , axis=1)
lowercase__ : Dict = os.path.join(training_args.output_dir , "predictions.txt")
if trainer.is_world_process_zero():
with open(__UpperCamelCase , "w") as writer:
writer.write("index\tprediction\n")
for index, item in enumerate(__UpperCamelCase):
lowercase__ : Union[str, Any] = label_list[item]
writer.write(f'''{index}\t{item}\n''')
if __name__ == "__main__":
main()
| 87 |
def __snake_case ( __UpperCamelCase : int = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }") | 312 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
while b:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = b, a % b
return a
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase_ , a % b )
def _lowerCAmelCase ( ):
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 255 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=() , UpperCamelCase_=None , UpperCamelCase_="no" , UpperCamelCase_="29500" ):
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
__SCREAMING_SNAKE_CASE = True
elif "IPython" in sys.modules:
__SCREAMING_SNAKE_CASE = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
__SCREAMING_SNAKE_CASE = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , UpperCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = PrepareForLaunch(UpperCamelCase_ , distributed_type="""TPU""" )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*UpperCamelCase_ )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCamelCase_ , master_addr="""127.0.01""" , master_port=UpperCamelCase_ , mixed_precision=UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = PrepareForLaunch(UpperCamelCase_ , distributed_type="""MULTI_GPU""" )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__SCREAMING_SNAKE_CASE = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=() , UpperCamelCase_=2 ):
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCamelCase_ , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
__SCREAMING_SNAKE_CASE = PrepareForLaunch(UpperCamelCase_ , debug=UpperCamelCase_ )
start_processes(UpperCamelCase_ , args=UpperCamelCase_ , nprocs=UpperCamelCase_ , start_method="""fork""" )
| 255 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Any = {
"camembert-base": 512,
}
_UpperCAmelCase : List[Any] = "▁"
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Dict="<s>" , _lowerCAmelCase: Union[str, Any]="</s>" , _lowerCAmelCase: Optional[int]="</s>" , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: Tuple="<unk>" , _lowerCAmelCase: Union[str, Any]="<pad>" , _lowerCAmelCase: str="<mask>" , _lowerCAmelCase: int=["<s>NOTUSED", "</s>NOTUSED"] , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Any , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase :Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowercase :Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase :int = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
lowercase :Tuple = len(self.fairseq_tokens_to_ids )
lowercase :int = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase :Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :List[Any] = [self.cls_token_id]
lowercase :List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :Any = [self.sep_token_id]
lowercase :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Any = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
lowercase :Tuple = []
lowercase :Any = ""
lowercase :str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase :Optional[int] = True
lowercase :Any = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase :str = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self: Dict ):
lowercase :int = self.__dict__.copy()
lowercase :List[str] = None
return state
def __setstate__( self: Optional[Any] , _lowerCAmelCase: Dict ):
lowercase :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :Optional[int] = {}
lowercase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :Any = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 236 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
SCREAMING_SNAKE_CASE__ = ["""gpt2"""]
SCREAMING_SNAKE_CASE__ = """gpt2"""
if is_tf_available():
class __lowerCamelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> str:
'''simple docstring'''
super().__init__()
lowercase_ = tokenizer
lowercase_ = AutoConfig.from_pretrained(UpperCAmelCase )
lowercase_ = TFGPTaLMHeadModel.from_config(UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = self.tokenizer(UpperCAmelCase )
lowercase_ = tokenized["input_ids"].to_tensor()
lowercase_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowercase_ = self.model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )["logits"]
return outputs
@require_tf
@require_keras_nlp
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
lowercase_ = [GPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowercase_ = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase_ = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowercase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A__ ( self ) -> int:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowercase_ = tokenizer([test_inputs] , return_tensors="tf" )
lowercase_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowercase_ = python_outputs[key].numpy()
lowercase_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def A__ ( self ) -> Dict:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = tf.function(UpperCAmelCase )
for test_inputs in self.test_sentences:
lowercase_ = tf.constant(UpperCAmelCase )
lowercase_ = compiled_tokenizer(UpperCAmelCase )
lowercase_ = tf_tokenizer(UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A__ ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = ModelToSave(tokenizer=UpperCAmelCase )
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = model.serving(UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase_ = Path(UpperCAmelCase ) / "saved.model"
tf.saved_model.save(UpperCAmelCase , UpperCAmelCase , signatures={"serving_default": model.serving} )
lowercase_ = tf.saved_model.load(UpperCAmelCase )
lowercase_ = loaded_model.signatures["serving_default"](UpperCAmelCase )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = tf_tokenizer(UpperCAmelCase ) # Build model with some sample inputs
lowercase_ = tf_tokenizer.get_config()
lowercase_ = TFGPTaTokenizer.from_config(UpperCAmelCase )
lowercase_ = model_from_config(UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def A__ ( self ) -> Dict:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowercase_ = 123123
for max_length in [3, 5, 1024]:
lowercase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase_ = tf_tokenizer(UpperCAmelCase , max_length=UpperCAmelCase )
lowercase_ = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 297 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
lowercase_ = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
lowercase_ = 1 - (matter_density + radiation_density + dark_energy)
lowercase_ = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase_ = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 297 | 1 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
__UpperCAmelCase = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(SCREAMING_SNAKE_CASE ) )
return round(SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowercase__ ):
lowercase : Union[str, Any] = ['flax', 'transformers']
def __init__( self :Dict ,*_UpperCamelCase :Any ,**_UpperCamelCase :str ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Union[str, Any] ,*_UpperCamelCase :int ,**_UpperCamelCase :int ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :List[Any] ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Any ):
requires_backends(cls ,["""flax""", """transformers"""] )
class __UpperCamelCase ( metaclass=lowercase__ ):
lowercase : int = ['flax', 'transformers']
def __init__( self :Optional[int] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :int ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Tuple ,*_UpperCamelCase :Tuple ,**_UpperCamelCase :List[str] ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :int ,*_UpperCamelCase :List[str] ,**_UpperCamelCase :Optional[Any] ):
requires_backends(cls ,["""flax""", """transformers"""] )
class __UpperCamelCase ( metaclass=lowercase__ ):
lowercase : Union[str, Any] = ['flax', 'transformers']
def __init__( self :Tuple ,*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :Union[str, Any] ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :int ,*_UpperCamelCase :Any ,**_UpperCamelCase :str ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Tuple ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Any ):
requires_backends(cls ,["""flax""", """transformers"""] )
class __UpperCamelCase ( metaclass=lowercase__ ):
lowercase : Dict = ['flax', 'transformers']
def __init__( self :List[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :Any ):
requires_backends(self ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Any ,*_UpperCamelCase :str ,**_UpperCamelCase :Any ):
requires_backends(cls ,["""flax""", """transformers"""] )
@classmethod
def a__ ( cls :Optional[Any] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :Union[str, Any] ):
requires_backends(cls ,["""flax""", """transformers"""] ) | 357 |
'''simple docstring'''
import functools
def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : List[str] = len(lowerCamelCase_ )
snake_case_ : Dict = len(lowerCamelCase_ )
@functools.cache
def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_A = 42 # [batch_size x 3]
_A = 42 # [batch_size x 3]
_A = 42 # [batch_size x 3]
_A = 42 # [batch_size x 3]
_A = 42
_A = 42
_A = 42
_A = 42
_A = 42
def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowerCamelCase ( self :str ) -> Any:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowerCamelCase ( self :str ) -> torch.Tensor:
__UpperCamelCase : Any = torch.arange(self.height * self.width )
__UpperCamelCase : Optional[int] = torch.stack(
[
pixel_indices % self.width,
torch.div(__snake_case , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
__UpperCamelCase , *__UpperCamelCase : Optional[int] = self.shape
__UpperCamelCase : List[str] = int(np.prod(__snake_case ) )
__UpperCamelCase : int = self.get_image_coords()
__UpperCamelCase : Optional[int] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
__UpperCamelCase : Optional[Any] = self.get_camera_rays(__snake_case )
__UpperCamelCase : List[str] = rays.view(__snake_case , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowerCamelCase ( self :Optional[int] , a :torch.Tensor ) -> torch.Tensor:
__UpperCamelCase , *__UpperCamelCase , __UpperCamelCase : Any = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
__UpperCamelCase : Dict = coords.view(__snake_case , -1 , 2 )
__UpperCamelCase : Optional[Any] = self.resolution()
__UpperCamelCase : str = self.fov()
__UpperCamelCase : Dict = (flat.float() / (res - 1)) * 2 - 1
__UpperCamelCase : int = fracs * torch.tan(fov / 2 )
__UpperCamelCase : Optional[int] = fracs.view(__snake_case , -1 , 2 )
__UpperCamelCase : Any = (
self.z.view(__snake_case , 1 , 3 )
+ self.x.view(__snake_case , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__snake_case , 1 , 3 ) * fracs[:, :, 1:]
)
__UpperCamelCase : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=__snake_case )
__UpperCamelCase : Dict = torch.stack(
[
torch.broadcast_to(self.origin.view(__snake_case , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__snake_case , *__snake_case , 2 , 3 )
def _lowerCamelCase ( self :Dict , a :int , a :int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__snake_case , height=__snake_case , x_fov=self.x_fov , y_fov=self.y_fov , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[str] = []
__UpperCamelCase : Dict = []
__UpperCamelCase : int = []
__UpperCamelCase : Union[str, Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20):
__UpperCamelCase : List[Any] = np.array([np.sin(__lowerCAmelCase), np.cos(__lowerCAmelCase), -0.5])
z /= np.sqrt(np.sum(z**2))
__UpperCamelCase : Tuple = -z * 4
__UpperCamelCase : Tuple = np.array([np.cos(__lowerCAmelCase), -np.sin(__lowerCAmelCase), 0.0])
__UpperCamelCase : Any = np.cross(__lowerCAmelCase , __lowerCAmelCase)
origins.append(__lowerCAmelCase)
xs.append(__lowerCAmelCase)
ys.append(__lowerCAmelCase)
zs.append(__lowerCAmelCase)
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0)).float() , x=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0)).float() , y=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0)).float() , z=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0)).float() , width=__lowerCAmelCase , height=__lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCAmelCase)) , ) | 232 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ):
a__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
# Let's go
a__ = parser.parse_args()
if not hasattr(__lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
a__ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 240 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Tuple=3_7 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=1_0 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : int="divided_space_time" , UpperCAmelCase_ : Optional[int]=None , ):
"""simple docstring"""
a : str = parent
a : Optional[Any] = batch_size
a : Dict = image_size
a : Optional[int] = num_channels
a : List[Any] = patch_size
a : List[Any] = num_frames
a : Optional[int] = is_training
a : Any = use_labels
a : Tuple = hidden_size
a : Tuple = num_hidden_layers
a : str = num_attention_heads
a : List[Any] = intermediate_size
a : Tuple = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Union[str, Any] = attention_type
a : Optional[Any] = initializer_range
a : str = scope
a : Dict = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a : Optional[Any] = (image_size // patch_size) ** 2
a : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
a : int = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.num_labels)
a : Optional[int] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Union[str, Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a : List[Any] = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : List[Any] = TimesformerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Union[str, Any] = TimesformerForVideoClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
# verify the logits shape
a : Optional[int] = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = self.prepare_config_and_inputs()
a : str = config_and_inputs
a : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
A : Optional[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Union[str, Any] = False
A : Tuple = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[Any] = TimesformerModelTester(self)
a : List[Any] = ConfigTester(
self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any]=False):
"""simple docstring"""
a : List[str] = copy.deepcopy(UpperCAmelCase_)
if return_labels:
if model_class in get_values(UpperCAmelCase_):
a : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Tuple = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(UpperCAmelCase_)
a : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = TimesformerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
if not self.has_attentions:
pass
else:
a : int = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
for model_class in self.all_model_classes:
a : Optional[Any] = self.model_tester.seq_length
a : List[str] = self.model_tester.num_frames
a : Dict = True
a : Optional[Any] = False
a : List[str] = True
a : Optional[int] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : Dict = True
a : List[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : List[str] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a : List[Any] = len(UpperCAmelCase_)
# Check attention is always last and order is fine
a : Union[str, Any] = True
a : Any = True
a : Optional[int] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
self.assertEqual(out_len + 1 , len(UpperCAmelCase_))
a : Any = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]):
a : Tuple = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : List[Any] = outputs.hidden_states
a : str = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : str = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[int] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a : Tuple = np.load(snake_case )
return list(snake_case )
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400').to(
UpperCAmelCase_)
a : Any = self.default_image_processor
a : Optional[Any] = prepare_video()
a : Any = image_processor(video[:8] , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[str] = model(**UpperCAmelCase_)
# verify the logits
a : str = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Tuple = torch.tensor([-0.30_16, -0.77_13, -0.42_05]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 352 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase_ : int = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __a : int , __a : List[Any]=7 , __a : Optional[Any]=3 , __a : Union[str, Any]=18 , __a : Any=30 , __a : Any=4_00 , __a : Tuple=None , __a : Union[str, Any]=True , __a : int=True , __a : int=None , ):
_a = size if size is not None else {"height": 20, "width": 20}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = size
_a = do_normalize
_a = do_convert_rgb
_a = [5_12, 10_24, 20_48, 40_96]
_a = patch_size if patch_size is not None else {"height": 16, "width": 16}
def UpperCamelCase__ ( self : List[Any] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase__ ( self : Optional[int] ):
_a = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_a = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : str ):
_a = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_convert_rgb" ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.image_processor_tester.prepare_dummy_image()
_a = self.image_processing_class(**self.image_processor_dict )
_a = 20_48
_a = image_processor(__a , return_tensors="pt" , max_patches=__a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase__ ( self : str ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : Tuple ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_a = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__a ):
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
_a = "Hello"
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a , header_text=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a , header_text=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : Any ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase__ ( self : Union[str, Any] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : str ):
_a = PixaStructImageProcessingTester(self , num_channels=4 )
_a = 3
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_convert_rgb" ) )
def UpperCamelCase__ ( self : Optional[int] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 346 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 6008_5147_5143 ) -> int:
try:
_a = int(lowercase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_a = 2
_a = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_a = i
while n % i == 0:
_a = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 346 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict=13 , _lowerCAmelCase : Union[str, Any]=30 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : int=3 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : int=37 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Optional[int]=10 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : str=2 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
A = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A = (image_size // patch_size) ** 2
A = num_patches + 2
def A (self : int ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def A (self : Dict ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A (self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ):
A = TFDeiTModel(config=__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A (self : str , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Dict ):
A = TFDeiTForMaskedImageModeling(config=__lowerCAmelCase )
A = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = TFDeiTForMaskedImageModeling(__lowerCAmelCase )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A (self : int , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ):
A = self.type_sequence_label_size
A = TFDeiTForImageClassification(__lowerCAmelCase )
A = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = TFDeiTForImageClassification(__lowerCAmelCase )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A (self : List[str] ):
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Tuple ):
A = TFDeiTModelTester(self )
A = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A (self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A (self : Optional[Any] ):
pass
def A (self : Union[str, Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Dense ) )
def A (self : str ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A (self : str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A (self : Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def A (self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def A (self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=False ):
A = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def A (self : Optional[Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFDeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __a ( ) ->List[Any]:
"""simple docstring"""
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A (self : int ):
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A (self : Optional[Any] ):
A = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__lowerCAmelCase , return_tensors="""tf""" )
# forward pass
A = model(**__lowerCAmelCase )
# verify the logits
A = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
A = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 258 | '''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a_ : List[Any] = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 351 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = 'laion/clap-htsat-unfused'
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Optional[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self , **a) -> Union[str, Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
SCREAMING_SNAKE_CASE = self.get_feature_extractor(do_normalize=a , padding_value=1.0)
SCREAMING_SNAKE_CASE = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=a , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , a)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = floats_list((3, 1000))
SCREAMING_SNAKE_CASE = feature_extractor(a , return_tensors='np')
SCREAMING_SNAKE_CASE = processor(audios=a , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = 'This is a test string'
SCREAMING_SNAKE_CASE = processor(text=a)
SCREAMING_SNAKE_CASE = tokenizer(a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(a)
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertListEqual(a , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = ClapProcessor(tokenizer=a , feature_extractor=a)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 327 | 0 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = year % 1_9
snake_case_ = year % 4
snake_case_ = year % 7
snake_case_ = math.floor(year / 1_0_0 )
snake_case_ = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
snake_case_ = leap_day_inhibits / 4
snake_case_ = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
snake_case_ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case_ = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
snake_case_ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(snake_case , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(snake_case , 4 , 1_8 )
else:
return datetime(snake_case , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
_SCREAMING_SNAKE_CASE : Any = "will be" if year > datetime.now().year else "was"
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 85 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s
__UpperCAmelCase = 3e8 # unit of c : m * s^-1
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase_ :Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 0 |
'''simple docstring'''
import math
def __lowerCAmelCase ():
_UpperCAmelCase : str = input("Enter message: " )
_UpperCAmelCase : Union[str, Any] = int(input(F"""Enter key [2-{len(__lowerCAmelCase ) - 1}]: """ ) )
_UpperCAmelCase : List[str] = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
_UpperCAmelCase : int = encrypt_message(__lowerCAmelCase , __lowerCAmelCase )
elif mode.lower().startswith("d" ):
_UpperCAmelCase : Optional[int] = decrypt_message(__lowerCAmelCase , __lowerCAmelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + '|'}""" )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = [""] * key
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = col
while pointer < len(__lowerCAmelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = math.ceil(len(__lowerCAmelCase ) / key )
_UpperCAmelCase : str = key
_UpperCAmelCase : List[Any] = (num_cols * num_rows) - len(__lowerCAmelCase )
_UpperCAmelCase : str = [""] * num_cols
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
_UpperCAmelCase : int = 0
row += 1
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 351 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ):
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase )
_UpperCAmelCase : Dict = tok.pad_token_id
def get_lens(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = tqdm(
DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_UpperCAmelCase : List[str] = []
for batch in dl:
_UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist()
_UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ):
max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) )
else:
max_lens.extend(__lowerCAmelCase )
return max_lens
_UpperCAmelCase : Dict = get_lens(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase )
pickle_save(__lowerCAmelCase , train_ds.len_file )
pickle_save(__lowerCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 322 | 0 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase__ : int = logging.get_logger(__name__)
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : int , __snake_case : List[str] , __snake_case : Optional[Any] ) -> Tuple[int, int]:
def constraint_to_multiple_of(__snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any]=0 , __snake_case : Dict=None ):
__A : Dict = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__A : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__A : int = math.ceil(val / multiple ) * multiple
return x
__A : Dict = (output_size, output_size) if isinstance(__lowerCamelCase , __lowerCamelCase ) else output_size
__A : str = get_image_size(__lowerCamelCase )
__A : str = output_size
# determine new height and width
__A : str = output_height / input_height
__A : Any = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__A : List[Any] = scale_width
else:
# fit height
__A : int = scale_height
__A : List[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=__lowerCamelCase )
__A : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=__lowerCamelCase )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE (A_ ):
lowerCAmelCase = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = False , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_snake_case)
__A : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
__A : Dict = get_size_dict(_snake_case)
__A : str = do_resize
__A : List[Any] = size
__A : Optional[Any] = keep_aspect_ratio
__A : Optional[int] = ensure_multiple_of
__A : Dict = resample
__A : Union[str, Any] = do_rescale
__A : Optional[int] = rescale_factor
__A : Any = do_normalize
__A : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__A : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = 1 , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : str = get_size_dict(_snake_case)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}')
__A : List[str] = get_resize_output_image_size(
_snake_case , output_size=(size['height'], size['width']) , keep_aspect_ratio=_snake_case , multiple=_snake_case , )
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = do_resize if do_resize is not None else self.do_resize
__A : Dict = size if size is not None else self.size
__A : Dict = get_size_dict(_snake_case)
__A : Union[str, Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__A : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__A : List[Any] = resample if resample is not None else self.resample
__A : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : List[str] = do_normalize if do_normalize is not None else self.do_normalize
__A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__A : Tuple = image_std if image_std is not None else self.image_std
__A : int = make_list_of_images(_snake_case)
if not valid_images(_snake_case):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__A : Tuple = [to_numpy_array(_snake_case) for image in images]
if do_resize:
__A : Dict = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case) for image in images]
if do_rescale:
__A : str = [self.rescale(image=_snake_case , scale=_snake_case) for image in images]
if do_normalize:
__A : str = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case) for image in images]
__A : Union[str, Any] = [to_channel_dimension_format(_snake_case , _snake_case) for image in images]
__A : Any = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case) != len(_snake_case):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(_snake_case):
__A : Optional[int] = target_sizes.numpy()
__A : Tuple = []
for idx in range(len(_snake_case)):
__A : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=_snake_case)
__A : Any = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(_snake_case)
else:
__A : Optional[Any] = logits.argmax(dim=1)
__A : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation | 190 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 | """simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = tokenizer(example["""content"""] ,truncation=lowercase )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase__ = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
UpperCAmelCase__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 30 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase ( lowercase_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv2ImageProcessor'
lowercase = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,__UpperCamelCase ,)
lowercase_ : Any = kwargs.pop('feature_extractor' )
lowercase_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__UpperCamelCase ,__UpperCamelCase )
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = True ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = 0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = False ,__UpperCamelCase = True ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
lowercase_ : Optional[Any] = self.image_processor(images=__UpperCamelCase ,return_tensors=__UpperCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase_ : Union[str, Any] = features['words']
lowercase_ : int = self.tokenizer(
text=text if text is not None else features['words'] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features['boxes'] ,word_labels=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,padding=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase ,stride=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,return_overflowing_tokens=__UpperCamelCase ,return_special_tokens_mask=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,return_length=__UpperCamelCase ,verbose=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase ,)
# add pixel values
lowercase_ : Union[str, Any] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowercase_ : Dict = self.get_overflowing_images(__UpperCamelCase ,encoded_inputs['overflow_to_sample_mapping'] )
lowercase_ : int = images
return encoded_inputs
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(__UpperCamelCase )} and {len(__UpperCamelCase )}''' )
return images_with_overflow
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase ,**__UpperCamelCase )
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,__UpperCamelCase ,)
return self.image_processor_class
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,__UpperCamelCase ,)
return self.image_processor
| 213 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if len(__SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(__SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
lowercase_ : Any = False
if low == high:
return swapped
lowercase_ : str = low
lowercase_ : int = high
while left < right:
if collection[left] > collection[right]:
lowercase_ , lowercase_ : Optional[Any] = (
collection[right],
collection[left],
)
lowercase_ : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase_ , lowercase_ : Dict = (
collection[right + 1],
collection[left],
)
lowercase_ : str = True
lowercase_ : Optional[Any] = low + int((high - low) / 2 )
lowercase_ : str = circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
lowercase_ : Dict = True
while is_not_sorted is True:
lowercase_ : Optional[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 213 | 1 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 1000000 ) -> int:
lowercase_ : List[Any] = limit + 1
lowercase_ : Optional[Any] = [0] * limit
for first_term in range(1 , UpperCAmelCase__ ):
for n in range(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase_ : List[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 21 | '''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : Union[tf.Tensor, np.ndarray] ) -> List[int]:
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
lowercase_ : Tuple = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
lowercase_ : Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[str] = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=1e-5 , UpperCAmelCase__ : List[str]=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
lowercase_ , lowercase_ : List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase_ : List[Any] = [1] * inputs.shape.rank
lowercase_ : List[str] = shape_list(UpperCAmelCase__ )[axis]
lowercase_ : List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase_ : str = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Any=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase_ : List[Any] = tf.shape(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase_ : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor ) -> tf.Tensor:
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
lowercase_ : List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase_ : Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase_ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase_ : Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase ( UpperCAmelCase__ : tf.Tensor , UpperCAmelCase__ : int , UpperCAmelCase__ : str = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Any:
lowercase_ : int = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase_ : Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
lowercase_ : Any = np.asarray(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase_ : Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = chunk_data
else:
lowercase_ : Any = data
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> str:
if name in group.attrs:
lowercase_ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs[name]]
else:
lowercase_ : int = []
lowercase_ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase__ , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Any:
def _expand_single_ad_tensor(UpperCAmelCase__ : Optional[Any] ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 21 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : List[Any] = 10
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : str = [1, 2, 3, 4]
__lowerCamelCase : Dict = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase , self.block_size , 0 ) , UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Tuple = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowerCamelCase : Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase , self.block_size , 0 ) , UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowerCamelCase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase , self.block_size , 0 ) , UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Union[str, Any] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
__lowerCamelCase , __lowerCamelCase : Any = process_story(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [] )
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Tuple = ""
__lowerCamelCase , __lowerCamelCase : Tuple = process_story(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [] )
self.assertEqual(UpperCAmelCase , [] )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : str = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
__lowerCamelCase , __lowerCamelCase : str = process_story(UpperCAmelCase )
__lowerCamelCase : Dict = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = ["It was the best of times."]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Optional[Any] = torch.tensor([1, 2, 3, 4] )
__lowerCamelCase : Tuple = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Union[str, Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowerCamelCase : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCamelCase : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Dict = 101
__lowerCamelCase : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__lowerCamelCase : Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCamelCase : List[Any] = compute_token_type_ids(UpperCAmelCase , UpperCAmelCase )
np.testing.assert_array_equal(UpperCAmelCase , UpperCAmelCase ) | 135 | """simple docstring"""
__A = [0, 2, 4, 6, 8]
__A = [1, 3, 5, 7, 9]
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: list[int] , _lowerCamelCase: int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__lowerCamelCase : Union[str, Any] = 0
for digit in range(10 ):
__lowerCamelCase : Tuple = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase )
return result
__lowerCamelCase : List[str] = 0
for digita in range(10 ):
__lowerCamelCase : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__lowerCamelCase : Any = ODD_DIGITS
else:
__lowerCamelCase : Dict = EVEN_DIGITS
for digita in other_parity_digits:
__lowerCamelCase : int = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , )
return result
def lowercase_ ( _lowerCamelCase: int = 9 ) -> int:
'''simple docstring'''
__lowerCamelCase : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase )
return result
if __name__ == "__main__":
print(F"""{solution() = }""") | 135 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
__A = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
__A = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
}
| 357 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 254 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict="pt" ):
"""simple docstring"""
a :Dict = {'''add_prefix_space''': True} if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not line.startswith(''' ''' ) else {}
a :str = padding_side
return tokenizer(
[line] , max_length=UpperCAmelCase_ , padding='''max_length''' if pad_to_max_length else None , truncation=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=None , ):
"""simple docstring"""
a :Dict = input_ids.ne(UpperCAmelCase_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="train" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="" , ):
super().__init__()
a :List[str] = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' )
a :str = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' )
a :List[str] = self.get_char_lens(self.src_file )
a :Any = max_source_length
a :Any = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
a :List[str] = tokenizer
a :Union[str, Any] = prefix
if n_obs is not None:
a :Union[str, Any] = self.src_lens[:n_obs]
a :List[str] = src_lang
a :Optional[Any] = tgt_lang
def __len__( self ):
return len(self.src_lens )
def __getitem__( self , _lowerCamelCase ):
a :Any = index + 1 # linecache starts at 1
a :int = self.prefix + linecache.getline(str(self.src_file ) , _lowerCamelCase ).rstrip('''\n''' )
a :int = linecache.getline(str(self.tgt_file ) , _lowerCamelCase ).rstrip('''\n''' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
a :Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
)
a :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
a :List[Any] = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_source_length , '''right''' )
a :List[Any] = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_target_length , '''right''' )
a :Dict = source_inputs['''input_ids'''].squeeze()
a :Dict = target_inputs['''input_ids'''].squeeze()
a :str = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = torch.stack([x['''input_ids'''] for x in batch] )
a :Any = torch.stack([x['''attention_mask'''] for x in batch] )
a :Any = torch.stack([x['''decoder_input_ids'''] for x in batch] )
a :Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
a :Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
a :Union[str, Any] = trim_batch(_lowerCamelCase , _lowerCamelCase )
a , a :int = trim_batch(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase )
a :Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
snake_case : Optional[Any] = getLogger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : List[List] ):
"""simple docstring"""
return list(itertools.chain.from_iterable(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
a :Any = get_git_info()
save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , '''git_log.json''' ) )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=4 , **UpperCAmelCase_ : str ):
"""simple docstring"""
with open(UpperCAmelCase_ , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
with open(UpperCAmelCase_ ) as f:
return json.load(UpperCAmelCase_ )
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[int] = git.Repo(search_parent_directories=UpperCAmelCase_ )
a :Optional[Any] = {
'''repo_id''': str(UpperCAmelCase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __lowerCamelCase ( UpperCAmelCase_ : Callable , UpperCAmelCase_ : Iterable ):
"""simple docstring"""
return list(map(UpperCAmelCase_ , UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
with open(UpperCAmelCase_ , '''wb''' ) as f:
return pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
def remove_articles(UpperCAmelCase_ : int ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , UpperCAmelCase_ )
def white_space_fix(UpperCAmelCase_ : Optional[int] ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase_ : Dict ):
a :str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase_ : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase_ ) ) ) )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
a :int = normalize_answer(UpperCAmelCase_ ).split()
a :Tuple = normalize_answer(UpperCAmelCase_ ).split()
a :str = Counter(UpperCAmelCase_ ) & Counter(UpperCAmelCase_ )
a :Optional[int] = sum(common.values() )
if num_same == 0:
return 0
a :int = 1.0 * num_same / len(UpperCAmelCase_ )
a :Optional[int] = 1.0 * num_same / len(UpperCAmelCase_ )
a :Any = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return normalize_answer(UpperCAmelCase_ ) == normalize_answer(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
a :str = 0
for hypo, pred in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
em += exact_match_score(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
em /= len(UpperCAmelCase_ )
return {"em": em}
def __lowerCamelCase ( UpperCAmelCase_ : Any ):
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
"""simple docstring"""
a :Union[str, Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
a :Optional[Any] = '''dropout_rate'''
for p in extra_params:
if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if not hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) and not hasattr(UpperCAmelCase_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(UpperCAmelCase_ ) )
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
continue
a :Union[str, Any] = p if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) else equivalent_param[p]
setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
return hparams, config
| 94 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(range(len(lowercase_ ) ) )
UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 0 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_SCREAMING_SNAKE_CASE = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Dict ):
return torch.atana(lowerCamelCase_ , lowerCamelCase_ ) / math.pi * 2
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = torch.sin(t * math.pi / 2 ) ** 2
__lowercase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase_ , lowerCamelCase_ )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class __lowercase ( nn.Module ):
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__()
__lowercase = DiffusionAttnUnetaD(_lowerCamelCase ,n_attn_layers=4 )
__lowercase = deepcopy(self.diffusion )
__lowercase = torch.quasirandom.SobolEngine(1 ,scramble=_lowerCamelCase )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = MODELS_MAP[model_name]['''url''']
os.system(f"wget {url} ./" )
return f"./{model_name}.ckpt"
_SCREAMING_SNAKE_CASE = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
_SCREAMING_SNAKE_CASE = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
_SCREAMING_SNAKE_CASE = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
_SCREAMING_SNAKE_CASE = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
_SCREAMING_SNAKE_CASE = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
_SCREAMING_SNAKE_CASE = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase_ ) and not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return name.replace(lowerCamelCase_ , lowerCamelCase_ )
elif name.startswith(lowerCamelCase_ ):
return [name.replace(lowerCamelCase_ , lowerCamelCase_ ) for v in value]
raise ValueError(f"Attn error with {name}" )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict=1_3 ):
__lowercase = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
__lowercase = 0
if string.startswith('''net.3.''' ):
depth += 1
__lowercase = string[6:]
elif string.startswith('''net.''' ):
__lowercase = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
__lowercase = string[7:]
if string.startswith('''main.''' ):
__lowercase = string[5:]
# mid block
if string[:2].isdigit():
__lowercase = string[:2]
__lowercase = string[2:]
else:
__lowercase = string[0]
__lowercase = string[1:]
if depth == max_depth:
__lowercase = MID_NUM_TO_LAYER[layer_num]
__lowercase = '''mid_block'''
elif depth > 0 and int(lowerCamelCase_ ) < 7:
__lowercase = DOWN_NUM_TO_LAYER[layer_num]
__lowercase = f"down_blocks.{depth}"
elif depth > 0 and int(lowerCamelCase_ ) > 7:
__lowercase = UP_NUM_TO_LAYER[layer_num]
__lowercase = f"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
__lowercase = DEPTH_0_TO_LAYER[layer_num]
__lowercase = f"up_blocks.{max_depth - 1}" if int(lowerCamelCase_ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f"Naming error with {input_string} and string_left: {string_left}." )
__lowercase = string_left[1:]
if "resnets" in new_layer:
__lowercase = convert_resconv_naming(lowerCamelCase_ )
elif "attentions" in new_layer:
__lowercase = convert_attn_naming(lowerCamelCase_ )
__lowercase = new_string_left
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = prefix + '''.''' + new_layer + '''.''' + string_left
else:
__lowercase = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
__lowercase = rename(lowerCamelCase_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__lowercase = transform_conv_attns(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
__lowercase = v
return new_state_dict
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str ):
if len(lowerCamelCase_ ) == 1:
if len(v.shape ) == 3:
# weight
__lowercase = v[:, :, 0]
else:
# bias
__lowercase = v
else:
# qkv matrices
__lowercase = v.shape[0]
__lowercase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__lowercase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__lowercase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
__lowercase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__lowercase = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
__lowercase = download(lowerCamelCase_ )
__lowercase = MODELS_MAP[model_name]['''sample_rate''']
__lowercase = MODELS_MAP[model_name]['''sample_size''']
__lowercase = Object()
__lowercase = sample_size
__lowercase = sample_rate
__lowercase = 0
__lowercase = UNetaDModel(sample_size=lowerCamelCase_ , sample_rate=lowerCamelCase_ )
__lowercase = diffusers_model.state_dict()
__lowercase = DiffusionUncond(lowerCamelCase_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase_ )['''state_dict'''] )
__lowercase = orig_model.diffusion_ema.eval()
__lowercase = orig_model.state_dict()
__lowercase = rename_orig_weights(lowerCamelCase_ )
__lowercase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__lowercase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase_ ) == 0, f"Problem with {renamed_minus_diffusers}"
assert all(k.endswith('''kernel''' ) for k in list(lowerCamelCase_ ) ), f"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
__lowercase = value.squeeze()
__lowercase = value
diffusers_model.load_state_dict(lowerCamelCase_ )
__lowercase = 1_0_0
__lowercase = 3_3
__lowercase = IPNDMScheduler(num_train_timesteps=lowerCamelCase_ )
__lowercase = torch.manual_seed(lowerCamelCase_ )
__lowercase = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase_ ).to(lowerCamelCase_ )
__lowercase = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase_ )[:-1]
__lowercase = get_crash_schedule(lowerCamelCase_ )
__lowercase = DanceDiffusionPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
__lowercase = torch.manual_seed(3_3 )
__lowercase = pipe(num_inference_steps=lowerCamelCase_ , generator=lowerCamelCase_ ).audios
__lowercase = sampling.iplms_sample(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , {} )
__lowercase = generated.clamp(-1 , 1 )
__lowercase = (generated - audio).abs().sum()
__lowercase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , lowerCamelCase_ )
print('''Diff max''' , lowerCamelCase_ )
assert diff_max < 1E-3, f"Diff max: {diff_max} is too much :-/"
print(f"Conversion for {model_name} successful!" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 359 |
'''simple docstring'''
import heapq
def _lowerCAmelCase ( lowerCamelCase_ : dict ):
__lowercase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 217 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase_ ( _snake_case ):
return np.maximum(0 ,__a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 25 |
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( __snake_case = 1_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = 0
for i in range(1, n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'biogpt'
def __init__( self , __a=4_23_84 , __a=10_24 , __a=24 , __a=16 , __a=40_96 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10_24 , __a=0.02 , __a=1e-12 , __a=True , __a=True , __a=0.0 , __a=0.0 , __a=1 , __a=0 , __a=2 , **__a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = vocab_size
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = scale_embedding
_UpperCamelCase = use_cache
_UpperCamelCase = layerdrop
_UpperCamelCase = activation_dropout
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
| 100 | 1 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase: str = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
lowerCAmelCase: Tuple = dataset.iloc[:, 1:2].values
lowerCAmelCase: List[Any] = dataset.iloc[:, 2].values
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase: str = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase: Optional[int] = PolynomialFeatures(degree=4)
lowerCAmelCase: Optional[Any] = poly_reg.fit_transform(X)
lowerCAmelCase: Any = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase__ ( ):
plt.scatter(__lowerCAmelCase , __lowerCAmelCase , color='red' )
plt.plot(__lowerCAmelCase , pol_reg.predict(poly_reg.fit_transform(__lowerCAmelCase ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 297 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase__ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
lowerCamelCase__ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
lowerCamelCase__ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowerCAmelCase__ ( self : int ) ->MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[List[List[str]]] , lowerCamelCase__ : List[List[str]] , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 4 , ) ->Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase__ , hypotheses=lowerCamelCase__ , min_len=lowerCamelCase__ , max_len=lowerCamelCase__ )
}
| 234 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 353 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__A = logging.get_logger(__name__)
# General docstring
__A = "ResNetConfig"
# Base docstring
__A = "microsoft/resnet-50"
__A = [1, 2048, 7, 7]
# Image classification docstring
__A = "microsoft/resnet-50"
__A = "tiger cat"
__A = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu") ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Dict =nn.Convad(
UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=kernel_size // 2 , bias=UpperCAmelCase_)
lowerCamelCase__: Any =nn.BatchNormad(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.convolution(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.normalization(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : ResNetConfig) ->str:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act)
lowerCamelCase__: Optional[int] =nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1)
lowerCamelCase__: Optional[Any] =config.num_channels
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration.")
lowerCamelCase__: Dict =self.embedder(UpperCAmelCase_)
lowerCamelCase__: str =self.pooler(UpperCAmelCase_)
return embedding
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2) ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Optional[Any] =nn.Convad(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , stride=UpperCAmelCase_ , bias=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =nn.BatchNormad(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.convolution(UpperCAmelCase_)
lowerCamelCase__: Any =self.normalization(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu") ->Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =in_channels != out_channels or stride != 1
lowerCamelCase__: str =(
ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__: Tuple =nn.Sequential(
ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , activation=UpperCAmelCase_) , )
lowerCamelCase__: Optional[Any] =ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Any) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Any =hidden_state
lowerCamelCase__: List[str] =self.layer(UpperCAmelCase_)
lowerCamelCase__: str =self.shortcut(UpperCAmelCase_)
hidden_state += residual
lowerCamelCase__: Dict =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "relu" , UpperCAmelCase_ : int = 4) ->Tuple:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Union[str, Any] =in_channels != out_channels or stride != 1
lowerCamelCase__: List[str] =out_channels // reduction
lowerCamelCase__: Optional[Any] =(
ResNetShortCut(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) if should_apply_shortcut else nn.Identity()
)
lowerCamelCase__: Dict =nn.Sequential(
ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_) , ResNetConvLayer(UpperCAmelCase_ , UpperCAmelCase_ , kernel_size=1 , activation=UpperCAmelCase_) , )
lowerCamelCase__: Tuple =ACTaFN[activation]
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: str =hidden_state
lowerCamelCase__: Optional[Any] =self.layer(UpperCAmelCase_)
lowerCamelCase__: List[Any] =self.shortcut(UpperCAmelCase_)
hidden_state += residual
lowerCamelCase__: Tuple =self.activation(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : ResNetConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , ) ->Dict:
'''simple docstring'''
super().__init__()
lowerCamelCase__: List[Any] =ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
lowerCamelCase__: List[str] =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase_ , UpperCAmelCase_ , stride=UpperCAmelCase_ , activation=config.hidden_act) , *[layer(UpperCAmelCase_ , UpperCAmelCase_ , activation=config.hidden_act) for _ in range(depth - 1)] , )
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor) ->Tensor:
'''simple docstring'''
lowerCamelCase__: List[Any] =input
for layer in self.layers:
lowerCamelCase__: Any =layer(UpperCAmelCase_)
return hidden_state
class _SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : ResNetConfig) ->Any:
'''simple docstring'''
super().__init__()
lowerCamelCase__: Tuple =nn.ModuleList([])
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ))
lowerCamelCase__: int =zip(config.hidden_sizes , config.hidden_sizes[1:])
for (in_channels, out_channels), depth in zip(UpperCAmelCase_ , config.depths[1:]):
self.stages.append(ResNetStage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , depth=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True) ->BaseModelOutputWithNoAttention:
'''simple docstring'''
lowerCamelCase__: str =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCamelCase__: Union[str, Any] =hidden_states + (hidden_state,)
lowerCamelCase__: str =stage_module(UpperCAmelCase_)
if output_hidden_states:
lowerCamelCase__: Optional[int] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ResNetConfig
lowercase_ = "resnet"
lowercase_ = "pixel_values"
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : int) ->Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , nn.Convad):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu")
elif isinstance(UpperCAmelCase_ , (nn.BatchNormad, nn.GroupNorm)):
nn.init.constant_(module.weight , 1)
nn.init.constant_(module.bias , 0)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=False) ->Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: List[str] =value
__A = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare ResNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : str) ->int:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
lowerCamelCase__: str =config
lowerCamelCase__: str =ResNetEmbeddings(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =ResNetEncoder(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =nn.AdaptiveAvgPoolad((1, 1))
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None) ->BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowerCamelCase__: Optional[int] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: int =self.embedder(UpperCAmelCase_)
lowerCamelCase__: int =self.encoder(
UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: int =encoder_outputs[0]
lowerCamelCase__: Tuple =self.pooler(UpperCAmelCase_)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Any:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
lowerCamelCase__: int =config.num_labels
lowerCamelCase__: Optional[Any] =ResNetModel(UpperCAmelCase_)
# classification head
lowerCamelCase__: Optional[Any] =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , ) ->ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowerCamelCase__: Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: List[Any] =self.resnet(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: int =outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase__: Dict =self.classifier(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase__: Dict ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase__: Tuple ="single_label_classification"
else:
lowerCamelCase__: Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
lowerCamelCase__: Dict =MSELoss()
if self.num_labels == 1:
lowerCamelCase__: str =loss_fct(logits.squeeze() , labels.squeeze())
else:
lowerCamelCase__: int =loss_fct(UpperCAmelCase_ , UpperCAmelCase_)
elif self.config.problem_type == "single_label_classification":
lowerCamelCase__: List[Any] =CrossEntropyLoss()
lowerCamelCase__: int =loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase__: List[str] =BCEWithLogitsLoss()
lowerCamelCase__: int =loss_fct(UpperCAmelCase_ , UpperCAmelCase_)
if not return_dict:
lowerCamelCase__: List[str] =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states)
@add_start_docstrings(
"\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , __SCREAMING_SNAKE_CASE , )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : str , UpperCAmelCase_ : List[Any]) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_)
super()._init_backbone(UpperCAmelCase_)
lowerCamelCase__: int =[config.embedding_size] + config.hidden_sizes
lowerCamelCase__: List[Any] =ResNetEmbeddings(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =ResNetEncoder(UpperCAmelCase_)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_)
@replace_return_docstrings(output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None) ->BackboneOutput:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__: Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase__: int =self.embedder(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.encoder(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_)
lowerCamelCase__: Any =outputs.hidden_states
lowerCamelCase__: int =()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCamelCase__: Dict =(feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase_ , )
| 273 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , )->Dict:
'''simple docstring'''
A_ : int = parent
A_ : Union[str, Any] = batch_size
A_ : int = image_size
A_ : Union[str, Any] = patch_size
A_ : Union[str, Any] = num_channels
A_ : Optional[Any] = is_training
A_ : str = use_labels
A_ : Optional[Any] = hidden_size
A_ : str = num_hidden_layers
A_ : str = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : int = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Optional[int] = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Union[str, Any] = mask_ratio
A_ : Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : int = (image_size // patch_size) ** 2
A_ : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Optional[int] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Tuple = self.get_config()
return config, pixel_values, labels
def _snake_case ( self )->List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Optional[Any] = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Optional[int] = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Tuple = model(_SCREAMING_SNAKE_CASE )
A_ : int = (self.image_size // self.patch_size) ** 2
A_ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : int = 1
A_ : str = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Dict = model(_SCREAMING_SNAKE_CASE )
A_ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : Tuple = self.prepare_config_and_inputs()
A_ , A_ , A_ : Tuple = config_and_inputs
A_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
snake_case = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = ViTMAEModelTester(self )
A_ : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _snake_case ( self )->List[str]:
'''simple docstring'''
pass
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
A_ : Dict = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : Tuple = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ : List[str] = outputs[0].cpu().numpy()
A_ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A_ : Union[str, Any] = after_outputs[0].cpu().numpy()
A_ : Optional[int] = 0
A_ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _snake_case ( self )->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _snake_case ( self )->Tuple:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self )->str:
'''simple docstring'''
pass
@slow
def _snake_case ( self )->Any:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( ):
A_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _snake_case ( self )->Any:
'''simple docstring'''
np.random.seed(2 )
A_ : Dict = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[Any] = prepare_img()
A_ : Dict = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : int = ViTMAEConfig()
A_ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ : int = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A_ : str = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ : Tuple = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 ) )
| 186 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __a ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
__lowercase : Optional[Any] = IFImgaImgSuperResolutionPipeline
__lowercase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith('mps' ):
lowercase__: List[str] = torch.manual_seed(lowerCAmelCase__ )
else:
lowercase__: str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowercase__: List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowercase__: Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowercase__: Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 288 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __a ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> str:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
lowercase__: str = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='test-config' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: Dict = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
lowercase__: Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='valid_org/test-config-org' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowercase__: Tuple = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowercase__: int = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__: List[Any] = c.n_embd + 1 # int
lowercase__: Any = c.resid_pdrop + 1.0 # float
lowercase__: Any = not c.scale_attn_weights # bool
lowercase__: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , 'mismatch for key: summary_type' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = PretrainedConfig()
lowercase__: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowercase__: List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(lowerCAmelCase__ )}.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__: str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowercase__: str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__: Optional[Any] = mock.Mock()
lowercase__: Tuple = 500
lowercase__: Any = {}
lowercase__: Dict = HTTPError
lowercase__: Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowercase__: Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase__ ) as mock_head:
lowercase__: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowercase__: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
lowercase__: Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__: str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__: Dict = ['config.42.0.0.json']
lowercase__: int = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , os.path.join(lowerCAmelCase__ , 'config.42.0.0.json' ) )
lowercase__: Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__: Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowercase__: Tuple = 'v4.0.0'
lowercase__ , lowercase__: List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__: Union[str, Any] = 'v3.0.0'
lowercase__: Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 288 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
| 100 |
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger('transformers.models.speecht5')
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
hf_model.apply_weight_norm()
UpperCamelCase = checkpoint["""input_conv.weight_g"""]
UpperCamelCase = checkpoint["""input_conv.weight_v"""]
UpperCamelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_g"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.weight_v"]
UpperCamelCase = checkpoint[F"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"]
UpperCamelCase = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"]
UpperCamelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , )-> List[Any]:
if config_path is not None:
UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
UpperCamelCase = SpeechTaHifiGanConfig()
UpperCamelCase = SpeechTaHifiGan(__UpperCamelCase )
UpperCamelCase = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase = np.load(__UpperCamelCase )
UpperCamelCase = stats[0].reshape(-1 )
UpperCamelCase = stats[1].reshape(-1 )
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
UpperCamelCase = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 183 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ = threading.Lock()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ = logging.WARNING
SCREAMING_SNAKE_CASE__ = True
def lowercase__ ( )-> Optional[int]:
UpperCamelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , __UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
F"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def lowercase__ ( )-> str:
return __name__.split(""".""" )[0]
def lowercase__ ( )-> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowercase__ ( )-> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCamelCase = False
def lowercase__ ( )-> None:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCamelCase = None
def lowercase__ ( )-> Tuple:
return log_levels
def lowercase__ ( __UpperCamelCase = None )-> logging.Logger:
if name is None:
UpperCamelCase = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__UpperCamelCase )
def lowercase__ ( )-> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def lowercase__ ( __UpperCamelCase )-> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(__UpperCamelCase )
def lowercase__ ( )-> Tuple:
return set_verbosity(__UpperCamelCase )
def lowercase__ ( )-> Union[str, Any]:
return set_verbosity(__UpperCamelCase )
def lowercase__ ( )-> Optional[int]:
return set_verbosity(__UpperCamelCase )
def lowercase__ ( )-> Tuple:
return set_verbosity(__UpperCamelCase )
def lowercase__ ( )-> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def lowercase__ ( )-> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def lowercase__ ( __UpperCamelCase )-> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__UpperCamelCase )
def lowercase__ ( __UpperCamelCase )-> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__UpperCamelCase )
def lowercase__ ( )-> None:
_configure_library_root_logger()
UpperCamelCase = False
def lowercase__ ( )-> None:
_configure_library_root_logger()
UpperCamelCase = True
def lowercase__ ( )-> None:
UpperCamelCase = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(__UpperCamelCase )
def lowercase__ ( )-> None:
UpperCamelCase = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__UpperCamelCase )
def lowercase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Tuple:
UpperCamelCase = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , __UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*__UpperCamelCase , **__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = warning_advice
@functools.lru_cache(__UpperCamelCase )
def lowercase__ ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
self.warning(*__UpperCamelCase , **__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = warning_once
class a_ :
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
UpperCamelCase = args[0] if args else None
def __iter__( self ) -> List[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
def empty_fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Dict:
"""simple docstring"""
return self
def __exit__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return
class a_ :
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
return EmptyTqdm(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ = _tqdm_cls()
def lowercase__ ( )-> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowercase__ ( )-> Optional[Any]:
global _tqdm_active
UpperCamelCase = True
hf_hub_utils.enable_progress_bars()
def lowercase__ ( )-> str:
global _tqdm_active
UpperCamelCase = False
hf_hub_utils.disable_progress_bars()
| 183 | 1 |
import sys
from collections import defaultdict
class __UpperCAmelCase :
def __init__( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = pos
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Tuple , UpperCAmelCase_: List[str] ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_SCREAMING_SNAKE_CASE = 2 * start + 1
else:
_SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = temp, tempa
_SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCAmelCase_ )
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = position[index]
while index != 0:
_SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_SCREAMING_SNAKE_CASE = heap[parent]
_SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] , UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = val
_SCREAMING_SNAKE_CASE = temp
self.set_position(UpperCAmelCase_ , UpperCAmelCase_ )
break
_SCREAMING_SNAKE_CASE = parent
else:
_SCREAMING_SNAKE_CASE = val
_SCREAMING_SNAKE_CASE = temp
self.set_position(UpperCAmelCase_ , 0 )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ ) // 2 - 1
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.top_to_bottom(UpperCAmelCase_ , UpperCAmelCase_ , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = positions[0]
_SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )
return temp
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = Heap()
_SCREAMING_SNAKE_CASE = [0] * len(snake_case__ )
_SCREAMING_SNAKE_CASE = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
_SCREAMING_SNAKE_CASE = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = distance
heap.heapify(snake_case__ ,snake_case__ )
for _ in range(1 ,len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = heap.delete_minimum(snake_case__ ,snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
_SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
snake_case__ ,heap.get_position(snake_case__ ) ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCamelCase = int(input('''Enter number of edges: ''').strip())
UpperCamelCase = defaultdict(list)
for _ in range(edges_number):
UpperCamelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 306 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : List[str] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = ['MaskFormerFeatureExtractor']
__snake_case : List[Any] = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Dict = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__snake_case : Dict = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 136 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__snake_case : Optional[int] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def __lowerCamelCase ( __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =test_results.split(""" """ )
A__ : List[Any] =0
A__ : Optional[int] =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ : Dict =expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(__snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __lowerCamelCase ( __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : Dict ={}
A__ : List[Any] =None
A__ : Any =False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""", __snake_case ):
A__ : List[str] =True
A__ : Optional[int] =line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A__ : List[str] =line
A__ : int =False
return failures
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : Any =title
A__ : List[Any] =doc_test_results["""time_spent"""].split(""",""" )[0]
A__ : str =doc_test_results["""success"""]
A__ : str =doc_test_results["""failures"""]
A__ : Optional[int] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ : List[Any] =doc_test_results
@property
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : List[str] =[self._time_spent]
A__ : str =0
for time in time_spent:
A__ : Union[str, Any] =time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase_ ) == 1:
A__ : str =[0, 0, time_parts[0]]
A__ , A__ , A__ : int =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A__ , A__ , A__ : Optional[int] =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"{int(lowerCAmelCase_ )}h{int(lowerCAmelCase_ )}m{int(lowerCAmelCase_ )}s"
@property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =40
A__ : List[Any] ={k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )}
A__ : Union[str, Any] =""""""
for category, failures in category_failures.items():
if len(lowerCAmelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Tuple =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase_ )
@staticmethod
def lowercase__ ( ) -> Any:
'''simple docstring'''
A__ : Dict =[
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase_ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase_ , )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A__ : Tuple =f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else """All tests passed."""
A__ : Optional[int] =client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase_ , )
def lowercase__ ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
A__ : int =""""""
for key, value in failures.items():
A__ : Optional[int] =value[:2_00] + """ [Truncated]""" if len(lowerCAmelCase_ ) > 2_50 else value
failures_text += f"*{key}*\n_{value}_\n\n"
A__ : List[Any] =job_name
A__ : Dict ={"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A__ : Dict ={
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A__ : Optional[Any] =self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A__ : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A__ : Optional[Any] =f"*Num failures* :{len(job_result['failed'] )} \n"
A__ : Tuple =job_result["""failures"""]
A__ : Optional[Any] =self.get_reply_blocks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text=lowerCAmelCase_ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"Results for {job}" , blocks=lowerCAmelCase_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =os.environ["""GITHUB_RUN_ID"""]
A__ : Tuple =f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
A__ : List[str] =requests.get(__snake_case ).json()
A__ : List[str] ={}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[int] =math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__snake_case ):
A__ : List[Any] =requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""", __snake_case )
return {}
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any ={}
if os.path.exists(__snake_case ):
A__ : str =os.listdir(__snake_case )
for file in files:
try:
with open(os.path.join(__snake_case, __snake_case ), encoding="""utf-8""" ) as f:
A__ : Tuple =f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__snake_case, __snake_case )}." ) from e
return _artifact
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : List[Any] =name
A__ : str =[]
def __str__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.name
def lowercase__ ( self : Any , lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
A__ : Dict[str, Artifact] ={}
A__ : int =filter(os.path.isdir, os.listdir() )
for directory in directories:
A__ : List[Any] =directory
if artifact_name not in _available_artifacts:
A__ : str =Artifact(__snake_case )
_available_artifacts[artifact_name].add_path(__snake_case )
return _available_artifacts
if __name__ == "__main__":
__snake_case : List[str] = get_job_links()
__snake_case : int = retrieve_available_artifacts()
__snake_case : Union[str, Any] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__snake_case : Dict = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__snake_case : List[Any] = github_actions_job_links.get('run_doctests')
__snake_case : Tuple = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__snake_case : Optional[int] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__snake_case , __snake_case , __snake_case : Optional[Any] = handle_test_results(artifact['stats'])
__snake_case : Optional[Any] = failed
__snake_case : Union[str, Any] = success
__snake_case : Union[str, Any] = time_spent[1:-1] + ', '
__snake_case : int = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__snake_case : Optional[int] = line.replace('FAILED ', '')
__snake_case : str = line.split()[0].replace('\n', '')
if "::" in line:
__snake_case , __snake_case : Optional[Any] = line.split('::')
else:
__snake_case , __snake_case : Any = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__snake_case : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__snake_case : List[str] = all_failures[test] if test in all_failures else 'N/A'
__snake_case : Optional[Any] = failure
break
__snake_case : int = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 136 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
if "resnet-50" in model_name:
UpperCamelCase__ :Union[str, Any] = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
UpperCamelCase__ :str = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
UpperCamelCase__ :Union[str, Any] = DetrConfig(use_timm_backbone=__lowerCamelCase , backbone_config=__lowerCamelCase )
# set label attributes
UpperCamelCase__ :List[Any] = '''panoptic''' in model_name
if is_panoptic:
UpperCamelCase__ :Union[str, Any] = 250
else:
UpperCamelCase__ :List[Any] = 91
UpperCamelCase__ :List[str] = '''huggingface/label-files'''
UpperCamelCase__ :Union[str, Any] = '''coco-detection-id2label.json'''
UpperCamelCase__ :List[str] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ :str = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ :List[str] = idalabel
UpperCamelCase__ :List[str] = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Dict = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def a ( __a , __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = state_dict.pop(__lowerCamelCase )
UpperCamelCase__ :Union[str, Any] = val
def a ( __a , __a=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :str = ''''''
if is_panoptic:
UpperCamelCase__ :int = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ :Dict = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :int = in_proj_weight[:256, :]
UpperCamelCase__ :Optional[Any] = in_proj_bias[:256]
UpperCamelCase__ :List[Any] = in_proj_weight[256:512, :]
UpperCamelCase__ :str = in_proj_bias[256:512]
UpperCamelCase__ :Optional[int] = in_proj_weight[-256:, :]
UpperCamelCase__ :Union[str, Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCamelCase__ :Any = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ :int = in_proj_weight[:256, :]
UpperCamelCase__ :List[Any] = in_proj_bias[:256]
UpperCamelCase__ :Union[str, Any] = in_proj_weight[256:512, :]
UpperCamelCase__ :Dict = in_proj_bias[256:512]
UpperCamelCase__ :Any = in_proj_weight[-256:, :]
UpperCamelCase__ :int = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase__ :int = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCamelCase__ :Optional[Any] = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[:256, :]
UpperCamelCase__ :Dict = in_proj_bias_cross_attn[:256]
UpperCamelCase__ :Optional[Any] = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase__ :Union[str, Any] = in_proj_bias_cross_attn[256:512]
UpperCamelCase__ :Dict = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase__ :Optional[int] = in_proj_bias_cross_attn[-256:]
def a ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ :Optional[Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def a ( __a , __a=None , __a=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = get_detr_config(__lowerCamelCase )
# load original model from torch hub
UpperCamelCase__ :Optional[Any] = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'''Converting model {model_name}...''' )
UpperCamelCase__ :List[str] = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=__lowerCamelCase ).eval()
UpperCamelCase__ :List[str] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__lowerCamelCase ):
if is_panoptic:
UpperCamelCase__ :Any = '''detr.''' + src
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCamelCase , is_panoptic=__lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ :List[str] = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCamelCase__ :Union[str, Any] = state_dict.pop(__lowerCamelCase )
UpperCamelCase__ :Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase__ :Optional[int] = state_dict.pop(__lowerCamelCase )
UpperCamelCase__ :List[Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCamelCase__ :Any = state_dict.pop(__lowerCamelCase )
UpperCamelCase__ :int = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ :Optional[int] = state_dict.pop(__lowerCamelCase )
UpperCamelCase__ :List[str] = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__ :List[Any] = DetrForSegmentation(__lowerCamelCase ) if is_panoptic else DetrForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# verify our conversion on an image
UpperCamelCase__ :List[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCamelCase__ :str = DetrImageProcessor(format=__lowerCamelCase )
UpperCamelCase__ :Any = processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ :Tuple = encoding['''pixel_values''']
UpperCamelCase__ :Optional[int] = detr(__lowerCamelCase )
UpperCamelCase__ :List[Any] = model(__lowerCamelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
__snake_case = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 97 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Dict[Optional[str], Type[Formatter]] = {}
__UpperCamelCase : Dict[Optional[str], str] = {}
__UpperCamelCase : Dict[Optional[str], Exception] = {}
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , ) -> Optional[int]:
a = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
a = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
a = format_type
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None ) -> List[str]:
a = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
a = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
__UpperCamelCase : str = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
__UpperCamelCase : List[str] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
__UpperCamelCase : List[str] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def __A ( __lowerCamelCase ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __A ( __lowerCamelCase , **__lowerCamelCase ) -> Formatter:
a = get_format_type_from_alias(__lowerCamelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**__lowerCamelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 228 | 0 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Any = multiprocessing.Manager()
lowercase__ : Dict = manager.list()
lowercase__ : Union[str, Any] = multiprocessing.Process(target=__lowerCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
lowercase__ : List[str] = shutil.rmtree
lowercase__ : Optional[Any] = os.rmdir
lowercase__ : Union[str, Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
lowercase__ : int = {}
with swallow_io():
with time_limit(__lowerCamelCase ):
exec(__lowerCamelCase , __lowerCamelCase )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
lowercase__ : Optional[Any] = rmtree
lowercase__ : str = rmdir
lowercase__ : str = chdir
@contextlib.contextmanager
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
def signal_handler(__lowerCamelCase , __lowerCamelCase ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __lowerCamelCase )
signal.signal(signal.SIGALRM , __lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Optional[Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(__lowerCamelCase ):
with contextlib.redirect_stderr(__lowerCamelCase ):
with redirect_stdin(__lowerCamelCase ):
yield
@contextlib.contextmanager
def __UpperCAmelCase ( ) -> List[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(__lowerCamelCase ):
yield dirname
class __A ( A_ ):
'''simple docstring'''
pass
class __A ( io.StringIO ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ,*_snake_case : int ,**_snake_case : List[Any] ) -> str:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : Any ,*_snake_case : Tuple ,**_snake_case : Dict ) -> Any:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : Dict ,*_snake_case : Dict ,**_snake_case : str ) -> List[str]:
"""simple docstring"""
raise OSError
def UpperCAmelCase ( self : int ,*_snake_case : str ,**_snake_case : str ) -> int:
"""simple docstring"""
return False
class __A ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
lowerCAmelCase : List[Any] = "stdin"
@contextlib.contextmanager
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
if root == ".":
yield
return
lowercase__ : List[Any] = os.getcwd()
os.chdir(__lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__lowerCamelCase )
def __UpperCAmelCase ( __lowerCamelCase=None ) -> Optional[int]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
lowercase__ : List[str] = None
lowercase__ : Tuple = None
import os
lowercase__ : List[str] = '''1'''
lowercase__ : Optional[int] = None
lowercase__ : List[str] = None
lowercase__ : Optional[Any] = None
lowercase__ : List[str] = None
lowercase__ : str = None
lowercase__ : str = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[Any] = None
lowercase__ : Tuple = None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
lowercase__ : Any = None
lowercase__ : Optional[int] = None
lowercase__ : Tuple = None
lowercase__ : str = None
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = None
lowercase__ : Any = None
lowercase__ : Tuple = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
lowercase__ : List[str] = None
lowercase__ : Union[str, Any] = None
lowercase__ : Tuple = None
lowercase__ : List[str] = None
import shutil
lowercase__ : List[Any] = None
lowercase__ : List[Any] = None
lowercase__ : Tuple = None
import subprocess
lowercase__ : Any = None # type: ignore
lowercase__ : int = None
import sys
lowercase__ : str = None
lowercase__ : Tuple = None
lowercase__ : int = None
lowercase__ : Optional[Any] = None
lowercase__ : Optional[Any] = None
| 302 |
"""simple docstring"""
import math
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : Tuple = 0
lowercase__ : Tuple = 0
while num > 0:
lowercase__ : int = num % 8
lowercase__ : Tuple = octal + (remainder * math.floor(math.pow(10 , __lowerCamelCase ) ))
counter += 1
lowercase__ : Optional[Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(__lowerCamelCase )}"""
def __UpperCAmelCase ( ) -> None:
print('''\n2 in octal is:''' )
print(decimal_to_octal(2 ) ) # = 2
print('''\n8 in octal is:''' )
print(decimal_to_octal(8 ) ) # = 10
print('''\n65 in octal is:''' )
print(decimal_to_octal(65 ) ) # = 101
print('''\n216 in octal is:''' )
print(decimal_to_octal(2_16 ) ) # = 330
print('''\n512 in octal is:''' )
print(decimal_to_octal(5_12 ) ) # = 1000
print('''\n''' )
if __name__ == "__main__":
main()
| 302 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class lowerCamelCase__( _lowerCAmelCase):
def __init__( self: List[Any] , UpperCamelCase_: str = "▁" , UpperCamelCase_: bool = True , UpperCamelCase_: Union[str, AddedToken] = "<unk>" , UpperCamelCase_: Union[str, AddedToken] = "</s>" , UpperCamelCase_: Union[str, AddedToken] = "<pad>" , ):
__lowerCamelCase = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
__lowerCamelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
__lowerCamelCase = token_dict["""token"""]
__lowerCamelCase = Tokenizer(Unigram() )
__lowerCamelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
__lowerCamelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ ),
pre_tokenizers.Digits(individual_digits=UpperCamelCase_ ),
pre_tokenizers.Punctuation(),
] )
__lowerCamelCase = decoders.Metaspace(replacement=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
__lowerCamelCase = TemplateProcessing(
single=F'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
__lowerCamelCase = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 80_00 , UpperCamelCase_: bool = True , ):
__lowerCamelCase = trainers.UnigramTrainer(
vocab_size=UpperCamelCase_ , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase_ , )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = [files]
self._tokenizer.train(UpperCamelCase_ , trainer=UpperCamelCase_ )
self.add_unk_id()
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Union[Iterator[str], Iterator[Iterator[str]]] , UpperCamelCase_: int = 80_00 , UpperCamelCase_: bool = True , ):
__lowerCamelCase = trainers.UnigramTrainer(
vocab_size=UpperCamelCase_ , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase_ , )
self._tokenizer.train_from_iterator(UpperCamelCase_ , trainer=UpperCamelCase_ )
self.add_unk_id()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = json.loads(self._tokenizer.to_str() )
__lowerCamelCase = self.special_tokens["""unk"""]["""id"""]
__lowerCamelCase = Tokenizer.from_str(json.dumps(UpperCamelCase_ ) )
| 12 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__a = pd.read_csv("sample_data.csv", header=None)
__a = df.shape[:1][0]
# If you're using some other dataset input the target column
__a = df.iloc[:, 1:2]
__a = actual_data.values.reshape(len_data, 1)
__a = MinMaxScaler().fit_transform(actual_data)
__a = 10
__a = 5
__a = 20
__a = len_data - periods * look_back
__a = actual_data[:division]
__a = actual_data[division - look_back :]
__a , __a = [], []
__a , __a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__a = np.array(train_x)
__a = np.array(test_x)
__a = np.array([list(i.ravel()) for i in train_y])
__a = np.array([list(i.ravel()) for i in test_y])
__a = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__a = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__a = model.predict(x_test)
| 66 | 0 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def a__ ( lowerCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( ) -> Iterator[int]:
UpperCAmelCase__ : Any = 2
while True:
if is_prime(lowerCAmelCase ):
yield num
num += 1
def a__ ( lowerCAmelCase = 2_00_00_00 ) -> int:
return sum(takewhile(lambda lowerCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 166 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def a__ ( lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Optional[int] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase__ : Dict = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase__ : int = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase__ : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(lowerCAmelCase )-1}""" )
if "norm" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase__ : int = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
UpperCAmelCase__ : Any = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase__ : int = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase__ : List[Any] = key.replace(F"""block{idx}""" , F"""block.{int(lowerCAmelCase )-1}""" )
if "attn.q" in key:
UpperCAmelCase__ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase__ : Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase__ : int = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase__ : List[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase__ : Optional[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase__ : List[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase__ : int = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(lowerCAmelCase )-1}""" )
if "bot_conv" in key:
UpperCAmelCase__ : int = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase__ : List[Any] = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase__ : List[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase__ : Optional[Any] = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase__ : List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase__ : int = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase__ : Union[str, Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase__ : Optional[int] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase__ : Optional[Any] = value
return new_state_dict
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase__ : Dict = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCAmelCase__ : int = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase__ : int = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase__ : int = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase__ : List[Any] = kv_bias[config.hidden_sizes[i] :]
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return image
@torch.no_grad()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=None ) -> Union[str, Any]:
UpperCAmelCase__ : Any = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase__ : Any = GLPNImageProcessor()
# prepare image
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase__ : Tuple = torch.load(lowerCAmelCase , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase__ : Optional[Any] = rename_keys(lowerCAmelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase , lowerCAmelCase )
# create HuggingFace model and load state dict
UpperCAmelCase__ : Union[str, Any] = GLPNForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# forward pass
UpperCAmelCase__ : Any = model(lowerCAmelCase )
UpperCAmelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase__ : int = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase__ : Any = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_A = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 166 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 |
import numpy as np
from PIL import Image
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 117 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase (lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase_ = (DDPMScheduler,)
def A_ ( self : Optional[int], **_UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__snake_case )
return config
def A_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__snake_case )
def A_ ( self : int ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__snake_case, beta_end=__snake_case )
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__snake_case )
def A_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__snake_case )
def A_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__snake_case )
def A_ ( self : List[Any] ) -> Any:
"""simple docstring"""
self.check_over_configs(thresholding=__snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__snake_case, prediction_type=__snake_case, sample_max_value=__snake_case, )
def A_ ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__snake_case )
def A_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**__snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def A_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(__snake_case )
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : int = torch.manual_seed(0 )
for t in reversed(range(__snake_case ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : Optional[int] = model(__snake_case, __snake_case )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : Dict = scheduler.step(__snake_case, __snake_case, __snake_case, generator=__snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pred_prev_sample
SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(__snake_case ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE__ : Dict = len(__snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(0 )
for t in reversed(range(__snake_case ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE__ : List[Any] = model(__snake_case, __snake_case )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE__ : Tuple = scheduler.step(__snake_case, __snake_case, __snake_case, generator=__snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
SCREAMING_SNAKE_CASE__ : Dict = pred_prev_sample
SCREAMING_SNAKE_CASE__ : int = torch.sum(torch.abs(__snake_case ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def A_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__snake_case )
SCREAMING_SNAKE_CASE__ : Any = scheduler.timesteps
for i, timestep in enumerate(__snake_case ):
if i == len(__snake_case ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timesteps[i + 1]
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.previous_timestep(__snake_case )
SCREAMING_SNAKE_CASE__ : Any = prev_t.item()
self.assertEqual(__snake_case, __snake_case )
def A_ ( self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Tuple = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__snake_case, msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__snake_case )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
SCREAMING_SNAKE_CASE__ : Dict = len(__snake_case )
with self.assertRaises(__snake_case, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__snake_case, timesteps=__snake_case )
def A_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**__snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__snake_case, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=__snake_case )
| 358 |
def _a ( SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
SCREAMING_SNAKE_CASE__ : List[Any] = ""
while len(SCREAMING_SNAKE_CASE__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : str = "0" + bin_string
SCREAMING_SNAKE_CASE__ : List[Any] = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE__ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE__ ) )
oct_string += str(SCREAMING_SNAKE_CASE__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 191 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : Any =params
a__ : Dict =np.array(lowerCAmelCase__ )
a__ : Any =np.array([len(lowerCAmelCase__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> int:
'''simple docstring'''
return len(self.lengths )
def _lowercase ( self ) -> Any:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : Any =self.params.max_model_input_size
a__ : List[str] =self.lengths > max_len
logger.info(F'''Splitting {sum(lowerCAmelCase__ )} too long sequences.''' )
def divide_chunks(lowerCAmelCase__ , lowerCAmelCase__ ):
return [l[i : i + n] for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )]
a__ : Optional[int] =[]
a__ : Any =[]
if self.params.mlm:
a__ , a__ : List[str] =self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
a__ , a__ : Union[str, Any] =self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ : Dict =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ : int =np.insert(lowerCAmelCase__ , 0 , lowerCAmelCase__ )
if sub_s[-1] != sep_id:
a__ : Tuple =np.insert(lowerCAmelCase__ , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(lowerCAmelCase__ )
new_tok_ids.extend(lowerCAmelCase__ )
new_lengths.extend([len(lowerCAmelCase__ ) for l in sub_seqs] )
a__ : Dict =np.array(lowerCAmelCase__ )
a__ : Dict =np.array(lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] =len(self )
a__ : Tuple =self.lengths > 1_1
a__ : str =self.token_ids[indices]
a__ : Optional[Any] =self.lengths[indices]
a__ : Dict =len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ : Any =self.params.special_tok_ids["unk_token"]
a__ : Any =len(self )
a__ : int =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ : Optional[Any] =(unk_occs / self.lengths) < 0.5
a__ : Optional[Any] =self.token_ids[indices]
a__ : Optional[Any] =self.lengths[indices]
a__ : Any =len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : List[str] =[t[0] for t in batch]
a__ : Optional[int] =[t[1] for t in batch]
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
# Max for paddings
a__ : Optional[Any] =max(lowerCAmelCase__ )
# Pad token ids
if self.params.mlm:
a__ : str =self.params.special_tok_ids["pad_token"]
else:
a__ : Dict =self.params.special_tok_ids["unk_token"]
a__ : Any =[list(t.astype(lowerCAmelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(lowerCAmelCase__ )) for t in token_ids]
assert len(tk_ ) == len(lowerCAmelCase__ )
assert all(len(lowerCAmelCase__ ) == max_seq_len_ for t in tk_ )
a__ : List[str] =torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ : List[Any] =torch.tensor(lowerCAmelCase__ ) # (bs)
return tk_t, lg_t
| 95 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = 'src/transformers'
__UpperCAmelCase = 'docs/source/en/tasks'
def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ :List[Any] = f.readlines()
# Find the start prompt.
lowerCAmelCase_ :Tuple = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
lowerCAmelCase_ :Dict = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() )
lowerCAmelCase_ :Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file(
filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 84 | 0 |
'''simple docstring'''
a_ = {str(digit): digit**5 for digit in range(1_0)}
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase__ ) )
def _a( ):
'''simple docstring'''
return sum(
number
for number in range(1_0_0_0, 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(UpperCamelCase__ ) )
if __name__ == "__main__":
print(solution()) | 222 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a_ = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =Github(os.environ['''GITHUB_TOKEN'''] )
SCREAMING_SNAKE_CASE__ : List[Any] =g.get_repo('''huggingface/transformers''' )
SCREAMING_SNAKE_CASE__ : List[Any] =repo.get_issues(state='''open''' )
for issue in open_issues:
SCREAMING_SNAKE_CASE__ : List[Any] =sorted([comment for comment in issue.get_comments()], key=lambda UpperCamelCase__ : i.created_at, reverse=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main() | 222 | 1 |
def lowerCAmelCase_ ( snake_case_ ):
_A : str = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 26 | from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Any:
_A = []
for part_id in partition_order:
_A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''').collect()
for row_idx, row in enumerate(snake_case__):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Optional[Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(2)
_A = [1, 0]
_A = _generate_iterable_examples(snake_case__ , snake_case__) # Reverse the partitions.
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__)
for i, (row_id, row_dict) in enumerate(generate_fn()):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> int:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(1)
_A = SparkExamplesIterable(snake_case__)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""") as generator_mock:
_A = lambda snake_case__: x.reverse()
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0])
_A = SparkExamplesIterable(snake_case__).shuffle_data_sources(snake_case__)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> List[str]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(20).repartition(4)
# Partitions 0 and 2
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Tuple:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 180 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
__magic_name__ = Features({'audio': Audio()} )
__magic_name__ = Features({'transcription': Value('string' )} )
__magic_name__ = "audio"
__magic_name__ = "transcription"
def a_ ( self , __snake_case ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , __snake_case ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
snake_case = copy.deepcopy(self )
snake_case = self.input_schema.copy()
snake_case = features[self.audio_column]
snake_case = input_schema
return task_template
@property
def a_ ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 213 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mgp-str'
def __init__( self , __snake_case=[3_2, 1_2_8] , __snake_case=4 , __snake_case=3 , __snake_case=2_7 , __snake_case=3_8 , __snake_case=5_0_2_5_7 , __snake_case=3_0_5_2_2 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=4.0 , __snake_case=True , __snake_case=False , __snake_case=1E-5 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=False , __snake_case=0.02 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = max_token_length
snake_case = num_character_labels
snake_case = num_bpe_labels
snake_case = num_wordpiece_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = mlp_ratio
snake_case = distilled
snake_case = layer_norm_eps
snake_case = drop_rate
snake_case = qkv_bias
snake_case = attn_drop_rate
snake_case = drop_path_rate
snake_case = output_aa_attentions
snake_case = initializer_range
| 213 | 1 |
'''simple docstring'''
class UpperCAmelCase__ :
def __init__( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = name
__UpperCamelCase = value
__UpperCamelCase = weight
def __repr__( self ) -> Any:
return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def __lowerCamelCase ( self ) -> Any:
return self.value
def __lowerCamelCase ( self ) -> Optional[Any]:
return self.name
def __lowerCamelCase ( self ) -> Any:
return self.weight
def __lowerCamelCase ( self ) -> List[str]:
return self.value / self.weight
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
for i in range(len(__A ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = sorted(__A ,key=__A ,reverse=__A )
__UpperCamelCase = []
__UpperCamelCase , __UpperCamelCase = 0.0, 0.0
for i in range(len(__A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _lowercase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
__UpperCamelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = np.not_equal(__A ,config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ),
] ,axis=-1 ,)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = FlaxPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(lowercase , lowercase )
__UpperCamelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase , lowercase=None , **lowercase ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(lowercase )
__UpperCamelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__UpperCamelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase , lowercase , lowercase ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest("""JIT Enabled""" ):
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=lowercase )
__UpperCamelCase = np.ones((1, 1) )
__UpperCamelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
__UpperCamelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__UpperCamelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
__UpperCamelCase = tokenizer(lowercase , return_tensors="""np""" , truncation=lowercase , max_length=5_1_2 , padding=lowercase )
__UpperCamelCase = model.generate(**lowercase , num_beams=2 ).sequences
__UpperCamelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 349 | 1 |
"""simple docstring"""
a : List[Any] = 65521
def lowercase__(A ):
"""simple docstring"""
lowercase__ : List[str]= 1
lowercase__ : str= 0
for plain_chr in plain_text:
lowercase__ : Union[str, Any]= (a + ord(a_ )) % MOD_ADLER
lowercase__ : List[str]= (b + a) % MOD_ADLER
return (b << 16) | a
| 367 |
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->int:
"""simple docstring"""
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase__ ( __lowercase ):
a__ : jnp.ndarray
@flax_register_to_config
class lowerCAmelCase__ ( nn.Module , __lowercase , __lowercase ):
a__ : int = 32
a__ : int = 4
a__ : int = 4
a__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
a__ : Union[bool, Tuple[bool]] = False
a__ : Tuple[int] = (320, 640, 1_280, 1_280)
a__ : int = 2
a__ : Union[int, Tuple[int]] = 8
a__ : Optional[Union[int, Tuple[int]]] = None
a__ : int = 1_280
a__ : float = 0.0
a__ : bool = False
a__ : jnp.dtype = jnp.floataa
a__ : bool = True
a__ : int = 0
a__ : bool = False
def __A ( self : str , SCREAMING_SNAKE_CASE__ : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
__lowerCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCamelCase = jnp.zeros(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
__lowerCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
__lowerCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowerCamelCase , __lowerCamelCase = jax.random.split(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )["params"]
def __A ( self : Union[str, Any] ) -> Any:
__lowerCamelCase = self.block_out_channels
__lowerCamelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCamelCase = self.num_attention_heads or self.attention_head_dim
# input
__lowerCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowerCamelCase = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
__lowerCamelCase = self.only_cross_attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCamelCase = []
__lowerCamelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCamelCase = output_channel
__lowerCamelCase = block_out_channels[i]
__lowerCamelCase = i == len(SCREAMING_SNAKE_CASE__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCamelCase = FlaxDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = down_blocks
# mid
__lowerCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__lowerCamelCase = []
__lowerCamelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__lowerCamelCase = output_channel
__lowerCamelCase = reversed_block_out_channels[i]
__lowerCamelCase = reversed_block_out_channels[min(i + 1 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
__lowerCamelCase = i == len(SCREAMING_SNAKE_CASE__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__lowerCamelCase = FlaxCrossAttnUpBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCamelCase = FlaxUpBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = output_channel
__lowerCamelCase = up_blocks
# out
__lowerCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__lowerCamelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray ):
__lowerCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCamelCase = timesteps.astype(dtype=jnp.floataa )
__lowerCamelCase = jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 0 )
__lowerCamelCase = self.time_proj(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.time_embedding(SCREAMING_SNAKE_CASE__ )
# 2. pre-process
__lowerCamelCase = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 2, 3, 1) )
__lowerCamelCase = self.conv_in(SCREAMING_SNAKE_CASE__ )
# 3. down
__lowerCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase = down_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
else:
__lowerCamelCase , __lowerCamelCase = down_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__lowerCamelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__lowerCamelCase = new_down_block_res_samples
# 4. mid
__lowerCamelCase = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__lowerCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
__lowerCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = up_block(
SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , deterministic=not train , )
else:
__lowerCamelCase = up_block(SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , deterministic=not train )
# 6. post-process
__lowerCamelCase = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = nn.silu(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.conv_out(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=SCREAMING_SNAKE_CASE__ )
| 270 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : Tuple = DisjunctiveConstraint(__lowerCamelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCamelCase ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def snake_case_ ( self : int ):
_UpperCAmelCase : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCamelCase ):
DisjunctiveConstraint(__lowerCamelCase ) # fails here
def snake_case_ ( self : int ):
_UpperCAmelCase : Any = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : Dict = DisjunctiveConstraint(__lowerCamelCase )
_UpperCAmelCase : List[str] = dc.update(1 )
_UpperCAmelCase : str = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase : Any = dc.update(2 )
_UpperCAmelCase : Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase : List[Any] = dc.update(3 )
_UpperCAmelCase : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : Any = DisjunctiveConstraint(__lowerCamelCase )
_UpperCAmelCase : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase : Tuple = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 356 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : int = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : List[Any] = {
"google/electra-small-generator": 5_12,
"google/electra-base-generator": 5_12,
"google/electra-large-generator": 5_12,
"google/electra-small-discriminator": 5_12,
"google/electra-base-discriminator": 5_12,
"google/electra-large-discriminator": 5_12,
}
_lowerCAmelCase : Optional[Any] = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = ElectraTokenizer
def __init__( self : Dict , A : Dict=None , A : Optional[int]=None , A : Dict=True , A : Optional[Any]="[UNK]" , A : Any="[SEP]" , A : str="[PAD]" , A : Tuple="[CLS]" , A : Optional[Any]="[MASK]" , A : Any=True , A : Tuple=None , **A : Any , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
_UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A ) != do_lower_case
or normalizer_state.get("strip_accents" , A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars
):
_UpperCAmelCase : Union[str, Any] = getattr(A , normalizer_state.pop("type" ) )
_UpperCAmelCase : Dict = do_lower_case
_UpperCAmelCase : Optional[int] = strip_accents
_UpperCAmelCase : Any = tokenize_chinese_chars
_UpperCAmelCase : Optional[Any] = normalizer_class(**A )
_UpperCAmelCase : int = do_lower_case
def snake_case_ ( self : Tuple , A : str , A : int=None ):
_UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
_UpperCAmelCase : Any = [self.sep_token_id]
_UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self : Any , A : str , A : Optional[str] = None ):
_UpperCAmelCase : List[Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 202 | 0 |
# Function to print upper half of diamond (pyramid)
def _snake_case ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for i in range(0 , lowerCAmelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def _snake_case ( lowerCAmelCase : Dict ):
"""simple docstring"""
for i in range(lowerCAmelCase__ , 0 , -1 ):
for _ in range(lowerCAmelCase__ , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def _snake_case ( lowerCAmelCase : Dict ):
"""simple docstring"""
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(lowerCAmelCase__ ) # upper half
reverse_floyd(lowerCAmelCase__ ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
__lowerCamelCase : Dict = 1
while K:
__lowerCamelCase : Optional[Any] = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__lowerCamelCase : Any = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 18 |
import os
import sys
lowercase__ :Tuple = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase__ :List[Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoConfig.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoModel.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase ( *lowerCAmelCase__ , **lowerCAmelCase__ ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 101 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self , __lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
UpperCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_snake_case )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_snake_case , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sgugger/tiny-distilbert-classification"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(_snake_case )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_snake_case , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(_snake_case )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = AutoConfig.from_pretrained(_snake_case )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case , [config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """patrickvonplaten/t5-tiny-random"""
UpperCamelCase__ = AutoConfig.from_pretrained(_snake_case )
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case , configs=[config] )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_snake_case , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case )
UpperCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(_snake_case , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(_snake_case , """env.csv""" ) , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_snake_case , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_snake_case , """env.csv""" ) ).exists() )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__lowerCAmelCase ):
self.assertTrue(hasattr(_snake_case , """sequential""" ) )
self.assertTrue(hasattr(_snake_case , """cumulative""" ) )
self.assertTrue(hasattr(_snake_case , """current""" ) )
self.assertTrue(hasattr(_snake_case , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , """log.txt""" ) , log_print=_snake_case , trace_memory_line_by_line=_snake_case , eager_mode=_snake_case , multi_process=_snake_case , )
UpperCamelCase__ = TensorFlowBenchmark(_snake_case )
UpperCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_snake_case , """log.txt""" ) ).exists() )
| 361 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCAmelCase : str = False
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : str = False
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
_lowerCAmelCase : Any = parser.parse_args()
_lowerCAmelCase : Dict = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
_lowerCAmelCase : List[Any] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
_lowerCAmelCase : Optional[Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
_lowerCAmelCase : List[str] = reader.read()
_lowerCAmelCase : Union[str, Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
_lowerCAmelCase : str = UNetaDModel(**config)
else:
_lowerCAmelCase : List[str] = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
_lowerCAmelCase : List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCAmelCase : Optional[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCAmelCase : Union[str, Any] = config[key]
del config[key]
_lowerCAmelCase : int = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
_lowerCAmelCase : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
_lowerCAmelCase : Optional[Any] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
_lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
_lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
_lowerCAmelCase : Union[str, Any] = param_value
_lowerCAmelCase : List[str] = True
if not has_changed:
_lowerCAmelCase : Any = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 300 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : List[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : List[Any] = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = ViTMSNModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : Dict = self.type_sequence_label_size
A_ : Tuple = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : int = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = ViTMSNModelTester(self )
A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(2 )
A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case )
A_ : List[str] = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )
# verify the logits
A_ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 300 | 1 |
import numpy as np
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = int(np.ceil((x_end - xa) / h ) )
SCREAMING_SNAKE_CASE : str = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE : List[str] = ya
SCREAMING_SNAKE_CASE : Dict = xa
for k in range(_lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = f(_lowercase , y[k] )
SCREAMING_SNAKE_CASE : List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE : List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
SCREAMING_SNAKE_CASE : int = f(x + h , y[k] + h * ka )
SCREAMING_SNAKE_CASE : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258 | from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE : List[Any] = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 258 | 1 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_UpperCamelCase = True
except ImportError:
_UpperCamelCase = False
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( _lowerCAmelCase ) -> List[str]:
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@staticmethod
def _lowercase ( _a : ArgumentParser ) -> Any:
__lowerCamelCase : List[str] = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=_a , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=_a , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=_a )
def __init__( self : Union[str, Any] , _a : bool , _a : str , _a : List[str]=None , *_a : Optional[Any] ) -> str:
__lowerCamelCase : str = testing
__lowerCamelCase : List[Any] = testing_file
__lowerCamelCase : Tuple = path
def _lowercase ( self : List[Any] ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__lowerCamelCase : Any = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(_a ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
__lowerCamelCase : List[Any] = (
Path(_a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
__lowerCamelCase : List[str] = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_a ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
__lowerCamelCase : List[Any] = json.load(_a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=_a , extra_context=_a , )
__lowerCamelCase : List[str] = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
__lowerCamelCase : Optional[int] = json.load(_a )
__lowerCamelCase : Tuple = configuration['lowercase_modelname']
__lowerCamelCase : List[Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'{directory}/configuration.json' )
__lowerCamelCase : Optional[Any] = 'PyTorch' in generate_tensorflow_pytorch_and_flax
__lowerCamelCase : List[Any] = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
__lowerCamelCase : Optional[Any] = 'Flax' in generate_tensorflow_pytorch_and_flax
__lowerCamelCase : Tuple = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(_a , exist_ok=_a )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=_a )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , 'w' ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(_a : List[str] ):
with open(_a , 'r' ) as f:
__lowerCamelCase : List[str] = f.readlines()
with open(_a , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_a )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_a : str , _a : str , _a : List[str] ):
# Create temp file
__lowerCamelCase ,__lowerCamelCase : Any = mkstemp()
__lowerCamelCase : int = False
with fdopen(_a , 'w' ) as new_file:
with open(_a ) as old_file:
for line in old_file:
new_file.write(_a )
if line_to_copy_below in line:
__lowerCamelCase : int = True
for line_to_copy in lines_to_copy:
new_file.write(_a )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(_a , _a )
# Remove original file
remove(_a )
# Move new file
move(_a , _a )
def skip_units(_a : Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_a : Dict ):
with open(_a ) as datafile:
__lowerCamelCase : int = []
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__lowerCamelCase : Any = line.split('"' )[1]
__lowerCamelCase : List[str] = skip_units(_a )
elif "# Below: " in line and "##" not in line:
__lowerCamelCase : str = line.split('"' )[1]
__lowerCamelCase : Any = skip_units(_a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_a , _a , _a )
__lowerCamelCase : str = []
elif "# Replace with" in line and "##" not in line:
__lowerCamelCase : int = []
elif "##" not in line:
lines_to_copy.append(_a )
remove(_a )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(_a )
| 208 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_UpperCamelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
_UpperCamelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =["""input_ids""", """attention_mask"""]
a_ =MBartTokenizer
a_ =[]
a_ =[]
def __init__( self : Optional[Any] , _a : Optional[int]=None , _a : Any=None , _a : Any="<s>" , _a : Optional[Any]="</s>" , _a : List[str]="</s>" , _a : List[Any]="<s>" , _a : Union[str, Any]="<unk>" , _a : str="<pad>" , _a : Any="<mask>" , _a : Optional[Any]=None , _a : str=None , _a : Tuple=None , **_a : Dict , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__lowerCamelCase : Optional[Any] = vocab_file
__lowerCamelCase : List[str] = False if not self.vocab_file else True
__lowerCamelCase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
__lowerCamelCase : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowerCamelCase : Optional[Any] = src_lang if src_lang is not None else 'en_XX'
__lowerCamelCase : int = self.convert_tokens_to_ids(self._src_lang )
__lowerCamelCase : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self : List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def _lowercase ( self : Union[str, Any] , _a : str ) -> None:
__lowerCamelCase : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self : List[Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self : int , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase : Optional[int] = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , _a : Optional[Any] , _a : str , _a : Optional[str] , _a : Optional[str] , **_a : Optional[int] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__lowerCamelCase : Optional[Any] = src_lang
__lowerCamelCase : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__lowerCamelCase : Tuple = self.convert_tokens_to_ids(_a )
__lowerCamelCase : Optional[Any] = tgt_lang_id
return inputs
def _lowercase ( self : Any , _a : List[str] , _a : str = "en_XX" , _a : Optional[List[str]] = None , _a : str = "ro_RO" , **_a : Tuple , ) -> BatchEncoding:
__lowerCamelCase : List[Any] = src_lang
__lowerCamelCase : str = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _lowercase ( self : List[Any] ) -> Any:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self : Dict ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self : Tuple , _a : List[str] ) -> None:
__lowerCamelCase : Tuple = self.convert_tokens_to_ids(_a )
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
__lowerCamelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Optional[Any] , _a : str ) -> None:
__lowerCamelCase : Union[str, Any] = self.convert_tokens_to_ids(_a )
__lowerCamelCase : int = []
__lowerCamelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
__lowerCamelCase : int = self.convert_ids_to_tokens(self.prefix_tokens )
__lowerCamelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__lowerCamelCase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
__lowerCamelCase : List[str] = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 208 | 1 |
from collections.abc import Generator
from math import sin
def __UpperCamelCase ( _A : bytes ) ->bytes:
"""simple docstring"""
if len(_A ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCamelCase_ =B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __UpperCamelCase ( _A : int ) ->bytes:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCamelCase_ =format(_A , """08x""" )[-8:]
lowerCamelCase_ =B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def __UpperCamelCase ( _A : bytes ) ->bytes:
"""simple docstring"""
lowerCamelCase_ =B""""""
for char in message:
bit_string += format(_A , """08b""" ).encode("""utf-8""" )
lowerCamelCase_ =format(len(_A ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_A ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __UpperCamelCase ( _A : bytes ) ->Generator[list[int], None, None]:
"""simple docstring"""
if len(_A ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(_A ) , 512 ):
lowerCamelCase_ =bit_string[pos : pos + 512]
lowerCamelCase_ =[]
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __UpperCamelCase ( _A : int ) ->int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCamelCase_ =format(_A , """032b""" )
lowerCamelCase_ =""""""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_A , 2 )
def __UpperCamelCase ( _A : int , _A : int ) ->int:
"""simple docstring"""
return (a + b) % 2**32
def __UpperCamelCase ( _A : int , _A : int ) ->int:
"""simple docstring"""
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __UpperCamelCase ( _A : bytes ) ->bytes:
"""simple docstring"""
lowerCamelCase_ =preprocess(_A )
lowerCamelCase_ =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCamelCase_ =0X67452301
lowerCamelCase_ =0XEFCDAB89
lowerCamelCase_ =0X98BADCFE
lowerCamelCase_ =0X10325476
lowerCamelCase_ =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_A ):
lowerCamelCase_ =aa
lowerCamelCase_ =ba
lowerCamelCase_ =ca
lowerCamelCase_ =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCamelCase_ =d ^ (b & (c ^ d))
lowerCamelCase_ =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCamelCase_ =c ^ (d & (b ^ c))
lowerCamelCase_ =(5 * i + 1) % 16
elif i <= 47:
lowerCamelCase_ =b ^ c ^ d
lowerCamelCase_ =(3 * i + 5) % 16
else:
lowerCamelCase_ =c ^ (b | not_aa(_A ))
lowerCamelCase_ =(7 * i) % 16
lowerCamelCase_ =(f + a + added_consts[i] + block_words[g]) % 2**32
lowerCamelCase_ =d
lowerCamelCase_ =c
lowerCamelCase_ =b
lowerCamelCase_ =sum_aa(_A , left_rotate_aa(_A , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCamelCase_ =sum_aa(_A , _A )
lowerCamelCase_ =sum_aa(_A , _A )
lowerCamelCase_ =sum_aa(_A , _A )
lowerCamelCase_ =sum_aa(_A , _A )
lowerCamelCase_ =reformat_hex(_A ) + reformat_hex(_A ) + reformat_hex(_A ) + reformat_hex(_A )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
import numpy as np
import qiskit
def __UpperCamelCase ( _A : int = 8 , _A : int | None = None ) ->str:
"""simple docstring"""
lowerCamelCase_ =np.random.default_rng(seed=_A )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCamelCase_ =6 * key_len
# Measurement basis for Alice's qubits.
lowerCamelCase_ =rng.integers(2 , size=_A )
# The set of states Alice will prepare.
lowerCamelCase_ =rng.integers(2 , size=_A )
# Measurement basis for Bob's qubits.
lowerCamelCase_ =rng.integers(2 , size=_A )
# Quantum Circuit to simulate BB84
lowerCamelCase_ =qiskit.QuantumCircuit(_A , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_A ):
if alice_state[index] == 1:
bbaa_circ.x(_A )
if alice_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_A ):
if bob_basis[index] == 1:
bbaa_circ.h(_A )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCamelCase_ =qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCamelCase_ =qiskit.execute(_A , _A , shots=1 , seed_simulator=_A )
# Returns the result of measurement.
lowerCamelCase_ =job.result().get_counts(_A ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCamelCase_ ="""""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_A , _A , _A )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCamelCase_ =gen_key[:key_len] if len(_A ) >= key_len else gen_key.ljust(_A , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 49 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''efficientnet'''
def __init__( self : Optional[int] , snake_case_ : int = 3 , snake_case_ : int = 600 , snake_case_ : float = 2.0 , snake_case_ : float = 3.1 , snake_case_ : int = 8 , snake_case_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , snake_case_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , snake_case_ : List[int] = [] , snake_case_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case_ : float = 0.25 , snake_case_ : str = "swish" , snake_case_ : int = 2_560 , snake_case_ : str = "mean" , snake_case_ : float = 0.02 , snake_case_ : float = 0.001 , snake_case_ : float = 0.99 , snake_case_ : float = 0.5 , snake_case_ : float = 0.2 , **snake_case_ : Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**snake_case_ )
A__ = num_channels
A__ = image_size
A__ = width_coefficient
A__ = depth_coefficient
A__ = depth_divisor
A__ = kernel_sizes
A__ = in_channels
A__ = out_channels
A__ = depthwise_padding
A__ = strides
A__ = num_block_repeats
A__ = expand_ratios
A__ = squeeze_expansion_ratio
A__ = hidden_act
A__ = hidden_dim
A__ = pooling_type
A__ = initializer_range
A__ = batch_norm_eps
A__ = batch_norm_momentum
A__ = dropout_rate
A__ = drop_connect_rate
A__ = sum(snake_case_ ) * 4
class UpperCAmelCase_ ( A_ ):
lowercase__ = version.parse('''1.11''' )
@property
def __magic_name__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __magic_name__ ( self : str ) -> float:
'''simple docstring'''
return 1e-5
| 247 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=32 , snake_case_ : List[Any]=3 , snake_case_ : Dict=4 , snake_case_ : Tuple=[10, 20, 30, 40] , snake_case_ : int=[2, 2, 3, 2] , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=37 , snake_case_ : Any="gelu" , snake_case_ : Union[str, Any]=10 , snake_case_ : str=0.02 , snake_case_ : str=["stage2", "stage3", "stage4"] , snake_case_ : str=3 , snake_case_ : List[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __magic_name__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __magic_name__ ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ = UperNetForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
A__ = UperNetModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(snake_case_ )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
A__ = Image.open(lowercase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> List[Any]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 247 | 1 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ) -> Union[str, Any]:
# load base model
__snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__snake_case : List[Any] = load_file(_UpperCAmelCase )
__snake_case : Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
__snake_case : List[str] = pipeline.text_encoder
else:
__snake_case : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
__snake_case : List[str] = pipeline.unet
# find the target layer
__snake_case : Union[str, Any] = layer_infos.pop(0 )
while len(_UpperCAmelCase ) > -1:
try:
__snake_case : List[str] = curr_layer.__getattr__(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__snake_case : Tuple = layer_infos.pop(0 )
elif len(_UpperCAmelCase ) == 0:
break
except Exception:
if len(_UpperCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__snake_case : Optional[int] = layer_infos.pop(0 )
__snake_case : Tuple = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' ,'lora_up' ) )
pair_keys.append(_UpperCAmelCase )
else:
pair_keys.append(_UpperCAmelCase )
pair_keys.append(key.replace('lora_up' ,'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__snake_case : Optional[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__snake_case : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase ,_UpperCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
__snake_case : str = state_dict[pair_keys[0]].to(torch.floataa )
__snake_case : Tuple = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase ,_UpperCAmelCase )
# update visited list
for item in pair_keys:
visited.append(_UpperCAmelCase )
return pipeline
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
A__ : str = parser.parse_args()
A__ : Optional[Any] = args.base_model_path
A__ : int = args.checkpoint_path
A__ : str = args.dump_path
A__ : Tuple = args.lora_prefix_unet
A__ : Optional[Any] = args.lora_prefix_text_encoder
A__ : Optional[Any] = args.alpha
A__ : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
A__ : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A__ : Dict = logging.getLogger()
def a_ ( ) -> Tuple:
__snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('-f' )
__snake_case : Any = parser.parse_args()
return args.f
def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase ,'r' ) as f:
__snake_case : List[str] = json.load(_UpperCAmelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def a_ ( ) -> Union[str, Any]:
__snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda'
return is_using_cuda and is_apex_available()
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@classmethod
def A_ ( cls : Any ) -> List[str]:
'''simple docstring'''
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__snake_case : Optional[int] = tempfile.mkdtemp()
__snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : List[Any] = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : str ) -> List[str]:
'''simple docstring'''
__snake_case : int = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__snake_case : Any = 7 if get_gpu_count() > 1 else 2
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = self.get_auto_remove_tmp_dir()
__snake_case : Tuple = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
__snake_case : str = self.get_auto_remove_tmp_dir()
__snake_case : Any = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : str = get_results(__a )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : List[str] = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : int = get_results(__a )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case : Tuple = self.get_auto_remove_tmp_dir()
__snake_case : str = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__snake_case : Dict = get_results(__a )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(__a )
__snake_case : List[str] = self.get_auto_remove_tmp_dir()
__snake_case : int = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__snake_case : List[str] = get_results(__a )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
__snake_case : Dict = self.get_auto_remove_tmp_dir()
__snake_case : Dict = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__snake_case : Optional[int] = get_results(__a )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
| 0 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""BeitFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 1 |
import sys
def lowerCAmelCase_ ( __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase )
UpperCAmelCase : List[str] =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
UpperCAmelCase : List[Any] =[[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase : str =a + chain_length - 1
UpperCAmelCase : Union[str, Any] =sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : List[Any] =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase : Optional[Any] =cost
UpperCAmelCase : Dict =c
return matrix, sol
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Dict =[30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase , UpperCAmelCase : Optional[int] =matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 78 | from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCAmelCase_ ( __lowerCAmelCase = "isbn/0140328726" )-> dict:
'''simple docstring'''
UpperCAmelCase : Tuple =olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase : Any =f'''{olid} is not a valid Open Library olid'''
raise ValueError(__lowerCAmelCase )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def lowerCAmelCase_ ( __lowerCAmelCase )-> dict:
'''simple docstring'''
UpperCAmelCase : int ={
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase : Tuple ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase : Optional[int] =[
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase : List[str] =data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : str =''', '''.join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__snake_case = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__snake_case = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 78 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 113 |
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
SCREAMING_SNAKE_CASE = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
SCREAMING_SNAKE_CASE = F'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , 'README.md' )
print(F'Generating {path}' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
__UpperCamelCase = Path(__file__).resolve().parent.parent.parent
__UpperCamelCase = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCamelCase,__UpperCamelCase,__UpperCamelCase = model_name.split('''-''')
__UpperCamelCase = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 113 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
return np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , *,
a : float = np.inf , a : str = "linear" , a : float = 0.0 , )-> None:
"""simple docstring"""
lowercase__ = regularization
lowercase__ = gamma
if kernel == "linear":
lowercase__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('rbf kernel requires gamma' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('gamma must be float or int' )
if not self.gamma > 0:
raise ValueError('gamma must be > 0' )
lowercase__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowercase__ = f"""Unknown kernel: {kernel}"""
raise ValueError(a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : ndarray , a : ndarray )-> float:
"""simple docstring"""
return np.dot(a , a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : ndarray , a : ndarray )-> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : list[ndarray] , a : ndarray )-> None:
"""simple docstring"""
lowercase__ = observations
lowercase__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowercase__) , ) = np.shape(a )
def to_minimize(a : ndarray ) -> float:
lowercase__ = 0
((lowercase__) , ) = np.shape(a )
for i in range(a ):
for j in range(a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(a )
lowercase__ = LinearConstraint(a , 0 , 0 )
lowercase__ = Bounds(0 , self.regularization )
lowercase__ = minimize(
a , np.ones(a ) , bounds=a , constraints=[ly_contraint] ).x
lowercase__ = l_star
# calculating mean offset of separation plane to points
lowercase__ = 0
for i in range(a ):
for j in range(a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowercase__ = s / n
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : ndarray )-> int:
"""simple docstring"""
lowercase__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase_ = TypeVar("""KEY""")
lowercase_ = TypeVar("""VAL""")
@dataclass(frozen=UpperCAmelCase , slots=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (Generic[KEY, VAL] ):
_UpperCamelCase : KEY
_UpperCamelCase : VAL
class SCREAMING_SNAKE_CASE (_Item ):
def __init__( self : Optional[int] )-> None:
"""simple docstring"""
super().__init__(a , a )
def __bool__( self : str )-> bool:
"""simple docstring"""
return False
lowercase_ = _DeletedItem()
class SCREAMING_SNAKE_CASE (MutableMapping[KEY, VAL] ):
def __init__( self : Tuple , a : int = 8 , a : float = 0.75 )-> None:
"""simple docstring"""
lowercase__ = initial_block_size
lowercase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase__ = capacity_factor
lowercase__ = 0
def SCREAMING_SNAKE_CASE_ ( self : Any , a : KEY )-> int:
"""simple docstring"""
return hash(a ) % len(self._buckets )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int )-> int:
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int , a : KEY , a : VAL )-> bool:
"""simple docstring"""
lowercase__ = self._buckets[ind]
if not stored:
lowercase__ = _Item(a , a )
self._len += 1
return True
elif stored.key == key:
lowercase__ = _Item(a , a )
return True
else:
return False
def SCREAMING_SNAKE_CASE_ ( self : str )-> bool:
"""simple docstring"""
lowercase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> bool:
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int )-> None:
"""simple docstring"""
lowercase__ = self._buckets
lowercase__ = [None] * new_size
lowercase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> None:
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> None:
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : KEY )-> Iterator[int]:
"""simple docstring"""
lowercase__ = self._get_bucket_index(a )
for _ in range(len(self._buckets ) ):
yield ind
lowercase__ = self._get_next_ind(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : KEY , a : VAL )-> None:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
if self._try_set(a , a , a ):
break
def __setitem__( self : List[Any] , a : KEY , a : VAL )-> None:
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(a , a )
def __delitem__( self : str , a : KEY )-> None:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
lowercase__ = self._buckets[ind]
if item is None:
raise KeyError(a )
if item is _deleted:
continue
if item.key == key:
lowercase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , a : KEY )-> VAL:
"""simple docstring"""
for ind in self._iterate_buckets(a ):
lowercase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(a )
def __len__( self : Tuple )-> int:
"""simple docstring"""
return self._len
def __iter__( self : int )-> Iterator[KEY]:
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Union[str, Any] )-> str:
"""simple docstring"""
lowercase__ = ' ,'.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 269 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
SCREAMING_SNAKE_CASE__ : List[str] = 'bert-base-cased'
SCREAMING_SNAKE_CASE__ : List[Any] = 'fp16'
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'bf16'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self ) -> Union[str, Any]:
super().setUp()
lowerCamelCase : int = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def _lowercase ( self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase__ ):
lowerCamelCase : List[Any] = self.dist_env.copy()
lowerCamelCase : Optional[Any] = F'''{i + 1}'''
lowerCamelCase : str = strategy
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowercase ( self ) -> int:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = self.dist_env.copy()
lowerCamelCase : List[str] = prefetch_policy
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowercase ( self ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase__ ):
lowerCamelCase : List[str] = self.dist_env.copy()
lowerCamelCase : Optional[Any] = state_dict_type
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase : List[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[int] = AutoModel.from_pretrained(UpperCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCamelCase : Union[str, Any] = self.dist_env.copy()
lowerCamelCase : Tuple = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCamelCase : Dict = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
lowerCamelCase : Optional[int] = "2000"
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowerCamelCase : Optional[Any] = self.dist_env.copy()
lowerCamelCase : List[Any] = "TRANSFORMER_BASED_WRAP"
lowerCamelCase : List[Any] = "T5Layer"
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase : Optional[int] = FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
lowerCamelCase : List[Any] = self.dist_env.copy()
lowerCamelCase : Any = "SIZE_BASED_WRAP"
lowerCamelCase : Tuple = "0"
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase : List[str] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowercase ( self ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCamelCase : str = self.dist_env.copy()
lowerCamelCase : List[Any] = mp_dtype
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase : List[Any] = Accelerator()
if mp_dtype == "fp16":
lowerCamelCase : Any = torch.floataa
elif mp_dtype == "bf16":
lowerCamelCase : List[Any] = torch.bfloataa
lowerCamelCase : str = MixedPrecision(param_dtype=UpperCamelCase__ , reduce_dtype=UpperCamelCase__ , buffer_dtype=UpperCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCamelCase : Optional[Any] = self.dist_env.copy()
lowerCamelCase : Optional[int] = str(UpperCamelCase__ ).lower()
with mockenv_context(**UpperCamelCase__ ):
lowerCamelCase : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
super().setUp()
lowerCamelCase : int = 0.82
lowerCamelCase : Any = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
lowerCamelCase : Dict = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCamelCase : int = 160
lowerCamelCase : Optional[int] = 160
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = os.path.join(self.test_scripts_folder , "test_performance.py" )
lowerCamelCase : Union[str, Any] = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
lowerCamelCase : int = cmd.copy()
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[str] = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
lowerCamelCase : List[str] = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(UpperCamelCase__ ):
lowerCamelCase : str = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
lowerCamelCase : Dict = len(UpperCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCamelCase : List[str] = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
lowerCamelCase : Dict = cmd_config[:-1]
lowerCamelCase : Dict = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : int = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
lowerCamelCase : Tuple = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCamelCase : str = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
| 48 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
a__ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def a ( self : List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a ( self : Tuple ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_80_15, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_55_06, '''token_str''': ''' accuser'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_80_15,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_55_06,
'''token_str''': ''' accuser''',
},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def a ( self : Optional[int] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_56_76, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 29_41, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
] , )
__UpperCAmelCase = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def a ( self : Any ):
__UpperCAmelCase = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
__UpperCAmelCase = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowercase , _lowercase )
@slow
@require_torch
def a ( self : int ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(_lowercase )
@slow
@require_tf
def a ( self : Optional[Any] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(_lowercase )
def a ( self : Dict , _lowercase : str ):
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_10, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 15_73, '''token_str''': ''' Chris'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 22_01,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_27_90,
'''token_str''': ''' Lyon''',
},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def a ( self : List[Any] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
__UpperCAmelCase = None
__UpperCAmelCase = None
self.run_pipeline_test(_lowercase , [] )
@require_tf
def a ( self : str ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
__UpperCAmelCase = None
__UpperCAmelCase = None
self.run_pipeline_test(_lowercase , [] )
def a ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a ( self : int , _lowercase : Tuple , _lowercase : Tuple ):
__UpperCAmelCase = fill_masker.tokenizer
__UpperCAmelCase = fill_masker.model
__UpperCAmelCase = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowercase , [
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
] , )
with self.assertRaises(_lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowercase ):
fill_masker('''This is''' )
self.run_test_top_k(_lowercase , _lowercase )
self.run_test_targets(_lowercase , _lowercase )
self.run_test_top_k_targets(_lowercase , _lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowercase , _lowercase )
self.fill_mask_with_multiple_masks(_lowercase , _lowercase )
def a ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = tokenizer.get_vocab()
__UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , targets=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowercase )
__UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowercase ) )
# Call argument
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowercase )
__UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowercase ) )
# Score equivalence
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
__UpperCAmelCase = [top_mask['''token_str'''] for top_mask in outputs]
__UpperCAmelCase = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ) == set(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
__UpperCAmelCase = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
# Raises with invalid
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='''''' )
def a ( self : List[Any] , _lowercase : Tuple , _lowercase : Optional[Any] ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , top_k=2 )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def a ( self : Optional[int] , _lowercase : int , _lowercase : Tuple ):
__UpperCAmelCase = tokenizer.get_vocab()
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
# top_k=2, ntargets=3
__UpperCAmelCase = sorted(vocab.keys() )[:3]
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCAmelCase = [el['''token_str'''] for el in sorted(_lowercase , key=lambda _lowercase : x["score"] , reverse=_lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ).issubset(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def a ( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Union[str, Any] ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCAmelCase = sorted(vocab.keys() )[:3]
__UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCAmelCase = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowercase ) , 3 )
def a ( self : Dict , _lowercase : Dict , _lowercase : Any ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowercase , [
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
] , )
| 86 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
a__ : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
a__ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def a ( self : List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def a ( self : Tuple ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 3_80_15, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 2_55_06, '''token_str''': ''' accuser'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 3_80_15,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 2_55_06,
'''token_str''': ''' accuser''',
},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def a ( self : Optional[int] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 3_56_76, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS'''},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 29_41, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
] , )
__UpperCAmelCase = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 3_56_76,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def a ( self : Any ):
__UpperCAmelCase = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
__UpperCAmelCase = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowercase , _lowercase )
@slow
@require_torch
def a ( self : int ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(_lowercase )
@slow
@require_tf
def a ( self : Optional[Any] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(_lowercase )
def a ( self : Dict , _lowercase : str ):
__UpperCAmelCase = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_10, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 15_73, '''token_str''': ''' Chris'''},
] , )
__UpperCAmelCase = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 22_01,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_27_90,
'''token_str''': ''' Lyon''',
},
] , )
__UpperCAmelCase = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 34_99, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_36_06, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 29_41, '''token_str''': ''' Te'''},
] , )
@require_torch
def a ( self : List[Any] ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
__UpperCAmelCase = None
__UpperCAmelCase = None
self.run_pipeline_test(_lowercase , [] )
@require_tf
def a ( self : str ):
__UpperCAmelCase = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
__UpperCAmelCase = None
__UpperCAmelCase = None
self.run_pipeline_test(_lowercase , [] )
def a ( self : Optional[int] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Tuple ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def a ( self : int , _lowercase : Tuple , _lowercase : Tuple ):
__UpperCAmelCase = fill_masker.tokenizer
__UpperCAmelCase = fill_masker.model
__UpperCAmelCase = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_lowercase , [
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
] , )
with self.assertRaises(_lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowercase ):
fill_masker('''This is''' )
self.run_test_top_k(_lowercase , _lowercase )
self.run_test_targets(_lowercase , _lowercase )
self.run_test_top_k_targets(_lowercase , _lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowercase , _lowercase )
self.fill_mask_with_multiple_masks(_lowercase , _lowercase )
def a ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = tokenizer.get_vocab()
__UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , targets=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowercase )
__UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowercase ) )
# Call argument
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , _lowercase )
__UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(_lowercase ) )
# Score equivalence
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
__UpperCAmelCase = [top_mask['''token_str'''] for top_mask in outputs]
__UpperCAmelCase = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ) == set(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=_lowercase )
__UpperCAmelCase = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
# Raises with invalid
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''''''] )
with self.assertRaises(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='''''' )
def a ( self : List[Any] , _lowercase : Tuple , _lowercase : Optional[Any] ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , top_k=2 )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowercase , [
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
] , )
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def a ( self : Optional[int] , _lowercase : int , _lowercase : Tuple ):
__UpperCAmelCase = tokenizer.get_vocab()
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
# top_k=2, ntargets=3
__UpperCAmelCase = sorted(vocab.keys() )[:3]
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCAmelCase = [el['''token_str'''] for el in sorted(_lowercase , key=lambda _lowercase : x["score"] , reverse=_lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ).issubset(_lowercase ):
__UpperCAmelCase = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def a ( self : Union[str, Any] , _lowercase : Tuple , _lowercase : Union[str, Any] ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCAmelCase = sorted(vocab.keys() )[:3]
__UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCAmelCase = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=_lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowercase ) , 3 )
def a ( self : Dict , _lowercase : Dict , _lowercase : Any ):
__UpperCAmelCase = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
__UpperCAmelCase = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_lowercase , [
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
[
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
{'''sequence''': ANY(_lowercase ), '''score''': ANY(_lowercase ), '''token''': ANY(_lowercase ), '''token_str''': ANY(_lowercase )},
],
] , )
| 86 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = SwinvaConfig()
lowercase_ = swinva_name.split('''_''' )
lowercase_ = name_split[1]
if "to" in name_split[3]:
lowercase_ = int(name_split[3][-3:] )
else:
lowercase_ = int(name_split[3] )
if "to" in name_split[2]:
lowercase_ = int(name_split[2][-2:] )
else:
lowercase_ = int(name_split[2][6:] )
if model_size == "tiny":
lowercase_ = 96
lowercase_ = (2, 2, 6, 2)
lowercase_ = (3, 6, 12, 24)
elif model_size == "small":
lowercase_ = 96
lowercase_ = (2, 2, 18, 2)
lowercase_ = (3, 6, 12, 24)
elif model_size == "base":
lowercase_ = 128
lowercase_ = (2, 2, 18, 2)
lowercase_ = (4, 8, 16, 32)
else:
lowercase_ = 192
lowercase_ = (2, 2, 18, 2)
lowercase_ = (6, 12, 24, 48)
if "to" in swinva_name:
lowercase_ = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowercase_ = 21_841
lowercase_ = '''huggingface/label-files'''
lowercase_ = '''imagenet-22k-id2label.json'''
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
else:
lowercase_ = 1_000
lowercase_ = '''huggingface/label-files'''
lowercase_ = '''imagenet-1k-id2label.json'''
lowercase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = img_size
lowercase_ = num_classes
lowercase_ = embed_dim
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
return config
def a ( snake_case__: Any ):
'''simple docstring'''
if "patch_embed.proj" in name:
lowercase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase_ = '''encoder.''' + name
if "attn.proj" in name:
lowercase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
lowercase_ = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
lowercase_ = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
lowercase_ = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
lowercase_ = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
lowercase_ = '''layernorm.weight'''
if name == "norm.bias":
lowercase_ = '''layernorm.bias'''
if "head" in name:
lowercase_ = name.replace('''head''' , '''classifier''' )
else:
lowercase_ = '''swinv2.''' + name
return name
def a ( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
lowercase_ = key.split('''.''' )
lowercase_ = int(key_split[1] )
lowercase_ = int(key_split[3] )
lowercase_ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
else:
lowercase_ = val[:dim]
lowercase_ = val[
dim : dim * 2
]
lowercase_ = val[-dim:]
else:
lowercase_ = val
return orig_state_dict
def a ( snake_case__: List[Any] , snake_case__: Optional[int] ):
'''simple docstring'''
lowercase_ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
lowercase_ = get_swinva_config(__lowerCAmelCase )
lowercase_ = SwinvaForImageClassification(__lowerCAmelCase )
model.eval()
lowercase_ = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
lowercase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
lowercase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
lowercase_ = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
lowercase_ = timm_model(inputs['''pixel_values'''] )
lowercase_ = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 30 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "labels"
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ) -> str:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,__snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
a__ = copy.deepcopy(self )
a__ = self.label_schema.copy()
a__ = features[self.label_column]
a__ = label_schema
return task_template
@property
def lowerCamelCase__( self :Dict ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 240 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = (UniPCMultistepScheduler,)
lowerCAmelCase_ = (('''num_inference_steps''', 2_5),)
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = dict(self.forward_default_kwargs )
lowercase_ : Optional[int] = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.dummy_sample
lowercase_ : List[Any] = 0.1 * sample
lowercase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ : Union[str, Any] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowercase_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
lowercase_ : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ , lowercase_ : Optional[Any] = sample, sample
for t in range(__SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
lowercase_ : Optional[int] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowercase_ : str = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = dict(self.forward_default_kwargs )
lowercase_ : Union[str, Any] = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.dummy_sample
lowercase_ : Union[str, Any] = 0.1 * sample
lowercase_ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowercase_ : Tuple = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__SCREAMING_SNAKE_CASE )
lowercase_ : int = scheduler_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowercase_ : Optional[Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowercase_ : Union[str, Any] = new_scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if scheduler is None:
lowercase_ : Optional[Any] = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = self.scheduler_classes[0]
lowercase_ : Optional[int] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = 10
lowercase_ : Optional[Any] = self.dummy_model()
lowercase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = dict(self.forward_default_kwargs )
lowercase_ : Optional[int] = kwargs.pop('''num_inference_steps''' , __SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
lowercase_ : Dict = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = self.dummy_sample
lowercase_ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(__SCREAMING_SNAKE_CASE , '''set_timesteps''' ):
lowercase_ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase_ : int = [residual + 0.2, residual + 0.15, residual + 0.10]
lowercase_ : int = dummy_past_residuals[: scheduler.config.solver_order]
lowercase_ : Tuple = scheduler.timesteps[5]
lowercase_ : Any = scheduler.timesteps[6]
lowercase_ : Dict = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
lowercase_ : Tuple = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = UniPCMultistepScheduler(**self.get_scheduler_config() )
lowercase_ : Union[str, Any] = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
lowercase_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowercase_ : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
lowercase_ : int = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowercase_ : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowercase_ : List[str] = self.full_loop(scheduler=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
def _snake_case ( self ):
"""simple docstring"""
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , )
def _snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , )
lowercase_ : Union[str, Any] = self.full_loop(
solver_order=__SCREAMING_SNAKE_CASE , solver_type=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , )
assert not torch.isnan(__SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def _snake_case ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__SCREAMING_SNAKE_CASE , time_step=0 )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.full_loop()
lowercase_ : Union[str, Any] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_464 ) < 1E-3
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = self.full_loop(prediction_type='''v_prediction''' )
lowercase_ : Optional[int] = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_014 ) < 1E-3
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.scheduler_classes[0]
lowercase_ : Tuple = self.get_scheduler_config(thresholding=__SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
lowercase_ : int = scheduler_class(**__SCREAMING_SNAKE_CASE )
lowercase_ : Any = 10
lowercase_ : Union[str, Any] = self.dummy_model()
lowercase_ : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ : str = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
lowercase_ : List[str] = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 264 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class lowerCAmelCase__ ( logging.LoggerAdapter ):
@staticmethod
def _snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
lowercase_ : Tuple = kwargs.pop('''main_process_only''' , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = kwargs.pop('''in_order''' , __SCREAMING_SNAKE_CASE )
if self.isEnabledFor(__SCREAMING_SNAKE_CASE ):
if self._should_log(__SCREAMING_SNAKE_CASE ):
lowercase_ , lowercase_ : Optional[Any] = self.process(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.logger.log(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
elif in_order:
lowercase_ : Optional[Any] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase_ , lowercase_ : Optional[int] = self.process(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.logger.log(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
state.wait_for_everyone()
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str = None ):
"""simple docstring"""
if log_level is None:
lowercase_ : Any = os.environ.get('''ACCELERATE_LOG_LEVEL''' , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = logging.getLogger(__SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__SCREAMING_SNAKE_CASE , {} )
| 264 | 1 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = BioGptModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )
__lowerCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = BioGptForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , *__a ):
__lowerCAmelCase = BioGptModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# create attention mask
__lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCamelCase )
__lowerCAmelCase = self.seq_length // 2
__lowerCAmelCase = 0
# first forward pass
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__lowerCAmelCase = ids_tensor((1,) , __lowerCamelCase ).item() + 1
__lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__lowerCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__lowerCamelCase )] , dim=1 , )
# get two different outputs
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )["last_hidden_state"]
__lowerCAmelCase = model(__lowerCamelCase , past_key_values=__lowerCamelCase , attention_mask=__lowerCamelCase )["last_hidden_state"]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def snake_case ( self , __a , __a , __a , __a , __a , *__a ):
__lowerCAmelCase = BioGptModel(config=__lowerCamelCase ).to(__lowerCamelCase ).eval()
__lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__lowerCamelCase )
# first forward pass
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
__lowerCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )["last_hidden_state"]
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[
"last_hidden_state"
]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3 ) )
def snake_case ( self , __a , __a , __a , __a , __a , *__a , __a=False ):
__lowerCAmelCase = BioGptForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__lowerCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , __a , *__a ):
__lowerCAmelCase = BioGptModel(__lowerCamelCase )
__lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def snake_case ( self , __a , __a , __a , __a , __a , *__a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = BioGptForTokenClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
__lowerCAmelCase
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =(
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCAmelCase : Dict =(BioGptForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : Optional[Any] =(
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] =False
def snake_case ( self ):
__lowerCAmelCase = BioGptModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowerCamelCase )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__lowerCamelCase , gradient_checkpointing=__lowerCamelCase )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowerCamelCase )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__lowerCamelCase )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__lowerCamelCase )
@slow
def snake_case ( self ):
__lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(__lowerCamelCase )
__lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__lowerCAmelCase = "left"
# Define PAD Token = EOS Token = 50256
__lowerCAmelCase = tokenizer.eos_token
__lowerCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
__lowerCAmelCase = [
"Hello, my dog is a little",
"Today, I",
]
__lowerCAmelCase = tokenizer(__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase )
__lowerCAmelCase = inputs["input_ids"].to(__lowerCamelCase )
__lowerCAmelCase = model.generate(
input_ids=__lowerCamelCase , attention_mask=inputs["attention_mask"].to(__lowerCamelCase ) , )
__lowerCAmelCase = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(__lowerCamelCase )
__lowerCAmelCase = model.generate(input_ids=__lowerCamelCase )
__lowerCAmelCase = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__lowerCAmelCase = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(__lowerCamelCase )
__lowerCAmelCase = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings )
__lowerCAmelCase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
__lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
__lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
__lowerCAmelCase = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = BioGptModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__lowerCamelCase )
__lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCAmelCase = BioGptForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = 3
__lowerCAmelCase = "multi_label_classification"
__lowerCAmelCase = input_dict["input_ids"]
__lowerCAmelCase = input_ids.ne(1 ).to(__lowerCamelCase )
__lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCAmelCase = BioGptForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__lowerCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
__lowerCAmelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
__lowerCAmelCase = model(__lowerCamelCase )[0]
__lowerCAmelCase = 4_23_84
__lowerCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __lowerCamelCase )
__lowerCAmelCase = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 10.45_57], [-11.04_69, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
__lowerCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__lowerCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(__lowerCamelCase )
torch.manual_seed(0 )
__lowerCAmelCase = tokenizer("COVID-19 is" , return_tensors="pt" ).to(__lowerCamelCase )
__lowerCAmelCase = model.generate(
**__lowerCamelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__lowerCamelCase , )
__lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowerCamelCase )
__lowerCAmelCase = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 57 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Dict ) -> str:
__SCREAMING_SNAKE_CASE :List[str] = [0] * len(a_ )
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :Any = [1] * len(a_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a_ ) ):
if indegree[i] == 0:
queue.append(a_ )
while queue:
__SCREAMING_SNAKE_CASE :List[str] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__SCREAMING_SNAKE_CASE :Tuple = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a_ )
print(max(a_ ) )
# Adjacency list of Graph
lowerCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph) | 358 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = '''bart'''
SCREAMING_SNAKE_CASE_ : str = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self ,SCREAMING_SNAKE_CASE__=5_02_65 ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=40_96 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=10_24 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=2 ,**SCREAMING_SNAKE_CASE__ ,) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = vocab_size
__SCREAMING_SNAKE_CASE :Union[str, Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE :Any = d_model
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_ffn_dim
__SCREAMING_SNAKE_CASE :List[str] = encoder_layers
__SCREAMING_SNAKE_CASE :Tuple = encoder_attention_heads
__SCREAMING_SNAKE_CASE :List[Any] = decoder_ffn_dim
__SCREAMING_SNAKE_CASE :Any = decoder_layers
__SCREAMING_SNAKE_CASE :Optional[int] = decoder_attention_heads
__SCREAMING_SNAKE_CASE :Optional[Any] = dropout
__SCREAMING_SNAKE_CASE :Optional[Any] = attention_dropout
__SCREAMING_SNAKE_CASE :Dict = activation_dropout
__SCREAMING_SNAKE_CASE :Union[str, Any] = activation_function
__SCREAMING_SNAKE_CASE :Union[str, Any] = init_std
__SCREAMING_SNAKE_CASE :int = encoder_layerdrop
__SCREAMING_SNAKE_CASE :Any = decoder_layerdrop
__SCREAMING_SNAKE_CASE :str = classifier_dropout
__SCREAMING_SNAKE_CASE :List[str] = use_cache
__SCREAMING_SNAKE_CASE :List[str] = encoder_layers
__SCREAMING_SNAKE_CASE :Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=SCREAMING_SNAKE_CASE__ ,pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,forced_eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :str = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
class _SCREAMING_SNAKE_CASE( A ):
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE :int = {0: '''batch'''}
__SCREAMING_SNAKE_CASE :int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :Tuple = {0: '''batch''', 1: '''decoder_sequence'''}
__SCREAMING_SNAKE_CASE :Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__SCREAMING_SNAKE_CASE :Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[Any] = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE :List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__SCREAMING_SNAKE_CASE :int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :str = super().outputs
else:
__SCREAMING_SNAKE_CASE :List[str] = super(SCREAMING_SNAKE_CASE__ ,self ).outputs
if self.use_past:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = self.num_layers
for i in range(SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :str = {0: '''batch''', 2: '''past_sequence + sequence'''}
__SCREAMING_SNAKE_CASE :Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# Generate decoder inputs
__SCREAMING_SNAKE_CASE :Union[str, Any] = seq_length if not self.use_past else 1
__SCREAMING_SNAKE_CASE :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__SCREAMING_SNAKE_CASE :Any = dict(**SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = common_inputs['''input_ids'''].shape
__SCREAMING_SNAKE_CASE :Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.num_attention_heads
__SCREAMING_SNAKE_CASE :Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :Optional[int] = decoder_seq_length + 3
__SCREAMING_SNAKE_CASE :Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )] ,dim=1 )
__SCREAMING_SNAKE_CASE :Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Dict = self.num_layers
__SCREAMING_SNAKE_CASE :int = min(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = max(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) - min_num_layers
__SCREAMING_SNAKE_CASE :int = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
torch.zeros(SCREAMING_SNAKE_CASE__ ),
) )
# TODO: test this.
__SCREAMING_SNAKE_CASE :Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE :List[str] = seqlen + 2
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Union[str, Any] = self.num_layers
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = self.num_attention_heads
__SCREAMING_SNAKE_CASE :Tuple = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__SCREAMING_SNAKE_CASE :Tuple = common_inputs['''attention_mask'''].dtype
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )] ,dim=1 )
__SCREAMING_SNAKE_CASE :str = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(SCREAMING_SNAKE_CASE__ )
]
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__SCREAMING_SNAKE_CASE :Optional[Any] = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[Any] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=SCREAMING_SNAKE_CASE__ )
# Generate dummy inputs according to compute batch and sequence
__SCREAMING_SNAKE_CASE :List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__SCREAMING_SNAKE_CASE :str = dict(tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ) )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = -1 ,SCREAMING_SNAKE_CASE__ = False ,SCREAMING_SNAKE_CASE__ = None ,) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
elif self.task == "causal-lm":
__SCREAMING_SNAKE_CASE :int = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE__ ,batch_size=SCREAMING_SNAKE_CASE__ ,seq_length=SCREAMING_SNAKE_CASE__ ,is_pair=SCREAMING_SNAKE_CASE__ ,framework=SCREAMING_SNAKE_CASE__ )
return common_inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__SCREAMING_SNAKE_CASE :Dict = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Dict = super(SCREAMING_SNAKE_CASE__ ,self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) | 239 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
snake_case_ : Dict = parser.parse_args()
snake_case_ : Tuple = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
snake_case_ : List[Any] = CLIPImageProcessor()
snake_case_ : Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
snake_case_ : int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 83 |
"""simple docstring"""
from math import factorial
def _A ( lowercase = 1_00 ):
"""simple docstring"""
return sum(int(lowercase ) for x in str(factorial(lowercase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip()))) | 81 | 0 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a ) | 368 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , [config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ = AutoConfig.from_pretrained(__a )
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a , configs=[config] )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
UpperCAmelCase__ = TensorFlowBenchmark(__a )
UpperCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 335 | 0 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : NestedDataStructureLike[PathLike] , lowerCAmelCase__ : Optional[NamedSplit] = None , lowerCAmelCase__ : Optional[Features] = None , lowerCAmelCase__ : str = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : List[Any] = field
_UpperCAmelCase : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
_UpperCAmelCase : List[str] = Json(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , field=lowerCAmelCase__ , **lowerCAmelCase__ , )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
if self.streaming:
_UpperCAmelCase : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_UpperCAmelCase : str = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : str = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
_UpperCAmelCase : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Dataset , lowerCAmelCase__ : Union[PathLike, BinaryIO] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , **lowerCAmelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_UpperCAmelCase : List[Any] = dataset
_UpperCAmelCase : Union[str, Any] = path_or_buf
_UpperCAmelCase : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase : Tuple = num_proc
_UpperCAmelCase : int = "utf-8"
_UpperCAmelCase : List[Any] = to_json_kwargs
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.to_json_kwargs.pop("path_or_buf" , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.to_json_kwargs.pop("orient" , "records" )
_UpperCAmelCase : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
_UpperCAmelCase : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
_UpperCAmelCase : List[str] = self.to_json_kwargs.pop("compression" , lowerCAmelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=lowerCAmelCase__ ) as buffer:
_UpperCAmelCase : Union[str, Any] = self._write(file_obj=lowerCAmelCase__ , orient=lowerCAmelCase__ , lines=lowerCAmelCase__ , index=lowerCAmelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
_UpperCAmelCase : Dict = self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase__ , lines=lowerCAmelCase__ , index=lowerCAmelCase__ , **self.to_json_kwargs )
return written
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = args
_UpperCAmelCase : Optional[Any] = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCAmelCase : Dict = batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase__ , orient=lowerCAmelCase__ , lines=lowerCAmelCase__ , index=lowerCAmelCase__ , **lowerCAmelCase__ )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : BinaryIO , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Tuple , ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
_UpperCAmelCase : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCAmelCase__ )
else:
_UpperCAmelCase , _UpperCAmelCase : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase__ , lowerCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(lowerCAmelCase__ )
return written | 145 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Any = (3_2, 3_2)
_UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Union[str, Any] = DDPMScheduler()
_UpperCAmelCase : str = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : int = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : str = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.dummy_cond_unet_upscale
_UpperCAmelCase : Tuple = DDPMScheduler()
_UpperCAmelCase : Dict = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : str = self.dummy_vae
_UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
_UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[str] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : int = output.images
assert image.shape[0] == 2
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Any = DDPMScheduler()
_UpperCAmelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase : Tuple = unet.half()
_UpperCAmelCase : Dict = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Dict = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : str = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "a cat sitting on a park bench"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Dict = "a cat sitting on a park bench"
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : int = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : Tuple = "a cat sitting on a park bench"
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9 | 145 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __lowercase ( snake_case_ : str = "isbn/0140328726" ) ->dict:
'''simple docstring'''
__A : int = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__A : Tuple = F"""{olid} is not a valid Open Library olid"""
raise ValueError(snake_case_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def __lowercase ( snake_case_ : dict ) ->dict:
'''simple docstring'''
__A : str = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__A : str = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A : Dict = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__A : str = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(snake_case_ ,snake_case_ ):
__A : Optional[Any] = ''', '''.join(snake_case_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
a_ = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
a_ = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 291 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """"""
_lowerCamelCase = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
super().__init__(self , **__lowerCamelCase )
__A : int = repo_info
__A : Optional[int] = token
__A : int = None
def UpperCamelCase__( self ):
'''simple docstring'''
if self.dir_cache is None:
__A : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__A : Tuple = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCamelCase ): {'''name''': str(__lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase = "rb" , **__lowerCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __lowerCamelCase ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__A : Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCamelCase , mode=__lowerCamelCase , headers=get_authentication_headers_for_url(__lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def UpperCamelCase__( self , __lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
self._get_dirs()
__A : Optional[Any] = self._strip_protocol(__lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=False , **__lowerCamelCase ):
'''simple docstring'''
self._get_dirs()
__A : Any = PurePosixPath(path.strip('''/''' ) )
__A : Any = {}
for p, f in self.dir_cache.items():
__A : List[Any] = PurePosixPath(p.strip('''/''' ) )
__A : Dict = p.parent
if root == path:
__A : Union[str, Any] = f
__A : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 291 | 1 |
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase__ = [0, 25, 50]
lowercase__ = [25, 50, 75]
lowercase__ = fuzz.membership.trimf(X, abca)
lowercase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase__ = np.ones(75)
lowercase__ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowercase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 96 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = {}
snake_case = job['''started_at''']
snake_case = job['''completed_at''']
snake_case = date_parser.parse(UpperCamelCase_ )
snake_case = date_parser.parse(UpperCamelCase_ )
snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case = start
snake_case = end
snake_case = duration_in_min
return job_info
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=None ):
"""simple docstring"""
snake_case = None
if token is not None:
snake_case = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
snake_case = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
snake_case = requests.get(UpperCamelCase_ ,headers=UpperCamelCase_ ).json()
snake_case = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase_ ) for job in result['''jobs''']} )
snake_case = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(UpperCamelCase_ ):
snake_case = requests.get(url + F'''&page={i + 2}''' ,headers=UpperCamelCase_ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(UpperCamelCase_ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : int = get_job_time(args.workflow_run_id)
_SCREAMING_SNAKE_CASE : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 127 | 0 |
"""simple docstring"""
from ... import PretrainedConfig
_UpperCamelCase : List[Any] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class snake_case ( UpperCAmelCase ):
__magic_name__ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__magic_name__ = '''nezha'''
def __init__( self : Dict , A : List[Any]=2_1_1_2_8 , A : List[Any]=7_6_8 , A : str=1_2 , A : Optional[Any]=1_2 , A : Dict=3_0_7_2 , A : Any="gelu" , A : List[str]=0.1 , A : int=0.1 , A : Optional[int]=5_1_2 , A : Optional[int]=6_4 , A : List[Any]=2 , A : Optional[Any]=0.02 , A : Optional[Any]=1E-12 , A : str=0.1 , A : Union[str, Any]=0 , A : str=2 , A : Tuple=3 , A : str=True , **A : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
a : Optional[Any] = vocab_size
a : Tuple = hidden_size
a : Tuple = num_hidden_layers
a : List[Any] = num_attention_heads
a : Any = hidden_act
a : Optional[Any] = intermediate_size
a : str = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Dict = max_relative_position
a : Dict = type_vocab_size
a : Tuple = initializer_range
a : List[str] = layer_norm_eps
a : List[str] = classifier_dropout
a : str = use_cache
| 186 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCamelCase : Optional[Any] = re.compile(r'\b(a|an|the)\b', re.UNICODE)
_UpperCamelCase : str = None
def snake_case ():
'''simple docstring'''
a : Any = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=A_ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=A_ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def snake_case (A_ :Optional[int] ):
'''simple docstring'''
a : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def snake_case (A_ :List[Any] ):
'''simple docstring'''
def remove_articles(A_ :str ):
return ARTICLES_REGEX.sub(' ' , A_ )
def white_space_fix(A_ :str ):
return " ".join(text.split() )
def remove_punc(A_ :Dict ):
a : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ :Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case (A_ :str ):
'''simple docstring'''
if not s:
return []
return normalize_answer(A_ ).split()
def snake_case (A_ :int , A_ :Union[str, Any] ):
'''simple docstring'''
return int(normalize_answer(A_ ) == normalize_answer(A_ ) )
def snake_case (A_ :Optional[int] , A_ :str ):
'''simple docstring'''
a : int = get_tokens(A_ )
a : Tuple = get_tokens(A_ )
a : List[Any] = collections.Counter(A_ ) & collections.Counter(A_ )
a : Dict = sum(common.values() )
if len(A_ ) == 0 or len(A_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a : List[Any] = 1.0 * num_same / len(A_ )
a : Optional[Any] = 1.0 * num_same / len(A_ )
a : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case (A_ :Any , A_ :Dict ):
'''simple docstring'''
a : Union[str, Any] = {}
a : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a : str = qa['id']
a : Dict = [t for t in qa['answers']['text'] if normalize_answer(A_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a : Dict = ['']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
a : Optional[Any] = preds[qid]
# Take max over all gold answers
a : str = max(compute_exact(A_ , A_ ) for a in gold_answers )
a : List[Any] = max(compute_fa(A_ , A_ ) for a in gold_answers )
return exact_scores, fa_scores
def snake_case (A_ :Union[str, Any] , A_ :List[Any] , A_ :List[Any] , A_ :Dict ):
'''simple docstring'''
a : List[str] = {}
for qid, s in scores.items():
a : Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
a : int = float(not qid_to_has_ans[qid] )
else:
a : Union[str, Any] = s
return new_scores
def snake_case (A_ :Tuple , A_ :int , A_ :Tuple=None ):
'''simple docstring'''
if not qid_list:
a : Optional[int] = len(A_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
a : List[Any] = len(A_ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def snake_case (A_ :str , A_ :Dict , A_ :List[Any] ):
'''simple docstring'''
for k in new_eval:
a : Union[str, Any] = new_eval[k]
def snake_case (A_ :Optional[Any] , A_ :Any , A_ :Dict , A_ :Optional[int] ):
'''simple docstring'''
plt.step(A_ , A_ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(A_ , A_ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(A_ )
plt.savefig(A_ )
plt.clf()
def snake_case (A_ :List[str] , A_ :str , A_ :Any , A_ :Any , A_ :List[Any]=None , A_ :Union[str, Any]=None ):
'''simple docstring'''
a : Optional[int] = sorted(A_ , key=lambda A_ : na_probs[k] )
a : Tuple = 0.0
a : Tuple = 1.0
a : Any = 0.0
a : int = [1.0]
a : int = [0.0]
a : str = 0.0
for i, qid in enumerate(A_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a : Tuple = true_pos / float(i + 1 )
a : Any = true_pos / float(A_ )
if i == len(A_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(A_ )
recalls.append(A_ )
if out_image:
plot_pr_curve(A_ , A_ , A_ , A_ )
return {"ap": 100.0 * avg_prec}
def snake_case (A_ :Optional[int] , A_ :Any , A_ :List[Any] , A_ :int , A_ :int , A_ :List[str] ):
'''simple docstring'''
if out_image_dir and not os.path.exists(A_ ):
os.makedirs(A_ )
a : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a : List[str] = make_precision_recall_eval(
A_ , A_ , A_ , A_ , out_image=os.path.join(A_ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
a : Optional[Any] = make_precision_recall_eval(
A_ , A_ , A_ , A_ , out_image=os.path.join(A_ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
a : Any = {k: float(A_ ) for k, v in qid_to_has_ans.items()}
a : Optional[int] = make_precision_recall_eval(
A_ , A_ , A_ , A_ , out_image=os.path.join(A_ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(A_ , A_ , 'pr_exact' )
merge_eval(A_ , A_ , 'pr_f1' )
merge_eval(A_ , A_ , 'pr_oracle' )
def snake_case (A_ :List[str] , A_ :List[str] , A_ :List[Any] , A_ :str ):
'''simple docstring'''
if not qid_list:
return
a : List[Any] = [na_probs[k] for k in qid_list]
a : List[str] = np.ones_like(A_ ) / float(len(A_ ) )
plt.hist(A_ , weights=A_ , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(A_ , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def snake_case (A_ :Tuple , A_ :Tuple , A_ :List[str] , A_ :List[str] ):
'''simple docstring'''
a : Any = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a : List[str] = num_no_ans
a : List[str] = cur_score
a : str = 0.0
a : Union[str, Any] = sorted(A_ , key=lambda A_ : na_probs[k] )
for i, qid in enumerate(A_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a : Optional[int] = scores[qid]
else:
if preds[qid]:
a : Dict = -1
else:
a : Optional[Any] = 0
cur_score += diff
if cur_score > best_score:
a : List[Any] = cur_score
a : str = na_probs[qid]
return 100.0 * best_score / len(A_ ), best_thresh
def snake_case (A_ :List[Any] , A_ :List[Any] , A_ :str , A_ :int , A_ :Optional[Any] , A_ :Union[str, Any] ):
'''simple docstring'''
a, a : Any = find_best_thresh(A_ , A_ , A_ , A_ )
a, a : List[Any] = find_best_thresh(A_ , A_ , A_ , A_ )
a : Union[str, Any] = best_exact
a : List[Any] = exact_thresh
a : List[str] = best_fa
a : Any = fa_thresh
def snake_case ():
'''simple docstring'''
with open(OPTS.data_file ) as f:
a : List[str] = json.load(A_ )
a : Tuple = dataset_json['data']
with open(OPTS.pred_file ) as f:
a : List[Any] = json.load(A_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a : int = json.load(A_ )
else:
a : List[Any] = {k: 0.0 for k in preds}
a : List[str] = make_qid_to_has_ans(A_ ) # maps qid to True/False
a : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v]
a : Dict = [k for k, v in qid_to_has_ans.items() if not v]
a, a : List[Any] = get_raw_scores(A_ , A_ )
a : Any = apply_no_ans_threshold(A_ , A_ , A_ , OPTS.na_prob_thresh )
a : Any = apply_no_ans_threshold(A_ , A_ , A_ , OPTS.na_prob_thresh )
a : Union[str, Any] = make_eval_dict(A_ , A_ )
if has_ans_qids:
a : Dict = make_eval_dict(A_ , A_ , qid_list=A_ )
merge_eval(A_ , A_ , 'HasAns' )
if no_ans_qids:
a : Tuple = make_eval_dict(A_ , A_ , qid_list=A_ )
merge_eval(A_ , A_ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(A_ , A_ , A_ , A_ , A_ , A_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(A_ , A_ , A_ , A_ , A_ , OPTS.out_image_dir )
histogram_na_prob(A_ , A_ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(A_ , A_ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(A_ , A_ )
else:
print(json.dumps(A_ , indent=2 ) )
if __name__ == "__main__":
_UpperCamelCase : Tuple = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 186 | 1 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__a = Mapping[str, np.ndarray]
__a = Mapping[str, Any] # Is a nested dict.
__a = 0.0_1
@dataclasses.dataclass(frozen=UpperCAmelCase_ )
class A__ :
"""simple docstring"""
UpperCamelCase_ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCamelCase_ : Optional[Sequence[int]] = None
def __UpperCAmelCase ( a_: Dict ):
_UpperCAmelCase : Optional[int] = R'''(\[[A-Z]+\]\n)'''
_UpperCAmelCase : List[str] = [tag.strip() for tag in re.split(a_, a_ ) if len(a_ ) > 0]
_UpperCAmelCase : Iterator[Tuple[str, List[str]]] = zip(tags[0::2], [l.split("\n" ) for l in tags[1::2]] )
_UpperCAmelCase : List[str] = ["N", "CA", "C"]
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Dict = None
for g in groups:
if "[PRIMARY]" == g[0]:
_UpperCAmelCase : Any = g[1][0].strip()
for i in range(len(a_ ) ):
if seq[i] not in residue_constants.restypes:
_UpperCAmelCase : List[Any] = '''X''' # FIXME: strings are immutable
_UpperCAmelCase : Dict = np.array(
[residue_constants.restype_order.get(a_, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_UpperCAmelCase : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(a_, g[1][axis].split() ) ) )
_UpperCAmelCase : Optional[Any] = np.array(a_ )
_UpperCAmelCase : Any = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(a_ ):
_UpperCAmelCase : Union[str, Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_UpperCAmelCase : List[str] = np.array(list(map({"-": 0, "+": 1}.get, g[1][0].strip() ) ) )
_UpperCAmelCase : Union[str, Any] = np.zeros(
(
len(a_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(a_ ):
_UpperCAmelCase : List[Any] = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=a_, atom_mask=a_, aatype=a_, residue_index=np.arange(len(a_ ) ), b_factors=a_, )
def __UpperCAmelCase ( a_: Dict, a_: int = 0 ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[int] = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
_UpperCAmelCase : Optional[int] = prot.parents
_UpperCAmelCase : Union[str, Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_UpperCAmelCase : List[Any] = [p for i, p in zip(a_, a_ ) if i == chain_id]
if parents is None or len(a_ ) == 0:
_UpperCAmelCase : Tuple = ['''N/A''']
pdb_headers.append(f"""PARENT {' '.join(a_ )}""" )
return pdb_headers
def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[int] ):
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = pdb_str.split("\n" )
_UpperCAmelCase : int = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
_UpperCAmelCase : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_UpperCAmelCase : Dict = []
if prot.parents_chain_index is not None:
_UpperCAmelCase : Dict[str, List[str]] = {}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(a_ ), [] )
parent_dict[str(a_ )].append(a_ )
_UpperCAmelCase : Union[str, Any] = max([int(a_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_UpperCAmelCase : Union[str, Any] = parent_dict.get(str(a_ ), ["N/A"] )
parents_per_chain.append(a_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_UpperCAmelCase : List[Any] = [['''N/A''']]
def make_parent_line(a_: List[Any] ) -> str:
return f"""PARENT {' '.join(a_ )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_UpperCAmelCase : Optional[Any] = 0
for i, l in enumerate(a_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(a_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(a_ ):
_UpperCAmelCase : List[str] = parents_per_chain[chain_counter]
else:
_UpperCAmelCase : Optional[int] = ['''N/A''']
out_pdb_lines.append(make_parent_line(a_ ) )
return "\n".join(a_ )
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : Tuple = residue_constants.restypes + ['''X''']
def res_atoa(a_: List[Any] ) -> str:
return residue_constants.restype_atoa.get(restypes[r], "UNK" )
_UpperCAmelCase : Optional[int] = residue_constants.atom_types
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : List[Any] = prot.atom_mask
_UpperCAmelCase : List[str] = prot.aatype
_UpperCAmelCase : Union[str, Any] = prot.atom_positions
_UpperCAmelCase : Any = prot.residue_index.astype(np.intaa )
_UpperCAmelCase : List[str] = prot.b_factors
_UpperCAmelCase : List[str] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_UpperCAmelCase : int = get_pdb_headers(a_ )
if len(a_ ) > 0:
pdb_lines.extend(a_ )
_UpperCAmelCase : str = aatype.shape[0]
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Any = string.ascii_uppercase
_UpperCAmelCase : Any = None
# Add all atom sites.
for i in range(a_ ):
_UpperCAmelCase : List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(a_, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
_UpperCAmelCase : str = '''ATOM'''
_UpperCAmelCase : Tuple = atom_name if len(a_ ) == 4 else f""" {atom_name}"""
_UpperCAmelCase : Optional[Any] = ''''''
_UpperCAmelCase : int = ''''''
_UpperCAmelCase : int = 1.00
_UpperCAmelCase : int = atom_name[0] # Protein supports only C, N, O, S, this works.
_UpperCAmelCase : int = ''''''
_UpperCAmelCase : Dict = '''A'''
if chain_index is not None:
_UpperCAmelCase : Union[str, Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_UpperCAmelCase : str = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(a_ )
atom_index += 1
_UpperCAmelCase : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_UpperCAmelCase : int = True
_UpperCAmelCase : List[str] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_UpperCAmelCase : Optional[Any] = '''TER'''
_UpperCAmelCase : Optional[int] = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(a_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(a_, a_ ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(a_ )
def __UpperCAmelCase ( a_: Optional[Any] ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __UpperCAmelCase ( a_: List[Any], a_: List[Any], a_: Union[str, Any] = None, a_: List[str] = None, a_: str = None, a_: Optional[int] = None, a_: int = None, ):
return Protein(
aatype=features["aatype"], atom_positions=result["final_atom_positions"], atom_mask=result["final_atom_mask"], residue_index=features["residue_index"] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ), chain_index=a_, remark=a_, parents=a_, parents_chain_index=a_, ) | 145 | import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {'''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : int = VOCAB_FILES_NAMES
a__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a__ : List[str] = ["""input_ids""", """attention_mask"""]
a__ : int = None
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase=False , __lowercase=False , **__lowercase , ) -> List[str]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__UpperCamelCase :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __lowercase) != add_prefix_space:
__UpperCamelCase :Any = getattr(__lowercase , pre_tok_state.pop('''type'''))
__UpperCamelCase :str = add_prefix_space
__UpperCamelCase :List[str] = pre_tok_class(**__lowercase)
__UpperCamelCase :Tuple = add_prefix_space
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :Tuple = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> BatchEncoding:
__UpperCamelCase :List[str] = kwargs.get('''is_split_into_words''' , __lowercase)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
''' pretokenized inputs.''')
return super()._encode_plus(*__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None) -> Tuple[str]:
__UpperCamelCase :Optional[Any] = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
def UpperCamelCase__ ( self , __lowercase) -> List[int]:
__UpperCamelCase :str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase) + [self.eos_token_id])
if len(__lowercase) > self.model_max_length:
__UpperCamelCase :Any = input_ids[-self.model_max_length :]
return input_ids
| 43 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = '''fnet'''
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=3_2000 , lowerCAmelCase__ : Union[str, Any]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : Tuple=3072 , lowerCAmelCase__ : Any="gelu_new" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Optional[Any]=512 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[Any]=0.02 , lowerCAmelCase__ : Union[str, Any]=1E-12 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=512 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : List[Any]=2 , **lowerCAmelCase__ : Optional[int] , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int = initializer_range
SCREAMING_SNAKE_CASE_: Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE_: int = layer_norm_eps
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE_: int = tpu_short_seq_length
| 127 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=14 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[int]=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Dict=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : Dict=0.02 , ):
SCREAMING_SNAKE_CASE_: List[Any] = parent
SCREAMING_SNAKE_CASE_: Any = batch_size
SCREAMING_SNAKE_CASE_: str = seq_length
SCREAMING_SNAKE_CASE_: Dict = is_training
SCREAMING_SNAKE_CASE_: str = use_input_mask
SCREAMING_SNAKE_CASE_: int = use_token_type_ids
SCREAMING_SNAKE_CASE_: Tuple = use_labels
SCREAMING_SNAKE_CASE_: int = vocab_size
SCREAMING_SNAKE_CASE_: Tuple = hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] = rotary_dim
SCREAMING_SNAKE_CASE_: Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Dict = intermediate_size
SCREAMING_SNAKE_CASE_: List[str] = hidden_act
SCREAMING_SNAKE_CASE_: List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_: int = initializer_range
SCREAMING_SNAKE_CASE_: List[Any] = None
SCREAMING_SNAKE_CASE_: Optional[Any] = vocab_size - 1
SCREAMING_SNAKE_CASE_: Tuple = vocab_size - 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = 20
SCREAMING_SNAKE_CASE_: Any = model_class_name(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.init_cache(input_ids.shape[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4")
SCREAMING_SNAKE_CASE_: List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
SCREAMING_SNAKE_CASE_: int = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
SCREAMING_SNAKE_CASE_: Dict = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}")
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: Tuple = model_class_name(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
SCREAMING_SNAKE_CASE_: List[str] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
SCREAMING_SNAKE_CASE_: Any = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4")
SCREAMING_SNAKE_CASE_: Optional[int] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}")
@require_flax
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_UpperCAmelCase : Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Tuple = FlaxGPTJModelTester(self)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left")
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B")
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: str = model.config.eos_token_id
SCREAMING_SNAKE_CASE_: int = jax.jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id).sequences
SCREAMING_SNAKE_CASE_: int = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
SCREAMING_SNAKE_CASE_: Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: int = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_: List[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
SCREAMING_SNAKE_CASE_: Optional[Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__ , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_: Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model(**lowerCAmelCase__).to_tuple()
SCREAMING_SNAKE_CASE_: List[str] = fx_model(**lowerCAmelCase__).to_tuple()
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model_class.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = fx_model_loaded(**lowerCAmelCase__).to_tuple()
self.assertEqual(
len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_: Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: Any = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: Optional[Any] = model_class(lowerCAmelCase__ , dtype=jnp.floataa)
SCREAMING_SNAKE_CASE_: int = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_: Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = 0
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = pt_model(**lowerCAmelCase__).to_tuple()
SCREAMING_SNAKE_CASE_: Optional[Any] = fx_model(**lowerCAmelCase__).to_tuple()
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pt_model_class.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__)
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any = pt_model_loaded(**lowerCAmelCase__).to_tuple()
self.assertEqual(
len(lowerCAmelCase__) , len(lowerCAmelCase__) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2)
@tooslow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class_name.from_pretrained("EleutherAI/gpt-j-6B")
SCREAMING_SNAKE_CASE_: str = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCAmelCase__)
| 127 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[Any] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
lowercase__ : Dict = dict(zip(a , range(len(a ) ) ) )
lowercase__ : Union[str, Any] = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
lowercase__ : List[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
lowercase__ : Dict = tempfile.mkdtemp()
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ : List[Any] = os.path.join(self.tmpdirname , a )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
# load decoder from hub
lowercase__ : Union[str, Any] = 'hf-internal-testing/ngram-beam-search-decoder'
def _UpperCAmelCase ( self , **a ) -> Any:
lowercase__ : Union[str, Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a )
def _UpperCAmelCase ( self , **a ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a )
def _UpperCAmelCase ( self , **a ) -> Optional[Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a )
def _UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_feature_extractor()
lowercase__ : Any = self.get_decoder()
lowercase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=a , feature_extractor=a , decoder=a )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , a )
def _UpperCAmelCase ( self ) -> List[str]:
lowercase__ : Dict = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowercase__ : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(a , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : int = self.get_feature_extractor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Optional[Any] = self.get_decoder()
lowercase__ : Any = WavaVecaProcessorWithLM(tokenizer=a , feature_extractor=a , decoder=a )
lowercase__ : Optional[int] = floats_list((3, 1_0_0_0) )
lowercase__ : List[Any] = feature_extractor(a , return_tensors='np' )
lowercase__ : Any = processor(a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : Any = self.get_feature_extractor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : List[str] = self.get_decoder()
lowercase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=a , feature_extractor=a , decoder=a )
lowercase__ : str = 'This is a test string'
lowercase__ : Any = processor(text=a )
lowercase__ : List[str] = tokenizer(a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self , a=(2, 1_0, 1_6) , a=7_7 ) -> Optional[int]:
np.random.seed(a )
return np.random.rand(*a )
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : List[str] = self.get_feature_extractor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = self.get_decoder()
lowercase__ : str = WavaVecaProcessorWithLM(tokenizer=a , feature_extractor=a , decoder=a )
lowercase__ : List[Any] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
lowercase__ : Dict = processor.decode(a )
lowercase__ : Optional[Any] = decoder.decode_beams(a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def _UpperCAmelCase ( self , a ) -> Any:
lowercase__ : Any = self.get_feature_extractor()
lowercase__ : int = self.get_tokenizer()
lowercase__ : Optional[Any] = self.get_decoder()
lowercase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=a , feature_extractor=a , decoder=a )
lowercase__ : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowercase__ : List[Any] = processor.batch_decode(a )
else:
with get_context(a ).Pool() as pool:
lowercase__ : int = processor.batch_decode(a , a )
lowercase__ : Tuple = list(a )
with get_context('fork' ).Pool() as p:
lowercase__ : str = decoder.decode_beams_batch(a , a )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(a , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(a , decoded_processor.logit_score )
self.assertListEqual(a , decoded_processor.lm_score )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = self.get_feature_extractor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = self.get_decoder()
lowercase__ : List[str] = WavaVecaProcessorWithLM(tokenizer=a , feature_extractor=a , decoder=a )
lowercase__ : List[str] = self._get_dummy_logits()
lowercase__ : int = 1_5
lowercase__ : Tuple = -20.0
lowercase__ : Optional[Any] = -4.0
lowercase__ : Dict = processor.batch_decode(
a , beam_width=a , beam_prune_logp=a , token_min_logp=a , )
lowercase__ : Dict = decoded_processor_out.text
lowercase__ : Union[str, Any] = list(a )
with get_context('fork' ).Pool() as pool:
lowercase__ : Union[str, Any] = decoder.decode_beams_batch(
a , a , beam_width=a , beam_prune_logp=a , token_min_logp=a , )
lowercase__ : List[Any] = [d[0][0] for d in decoded_decoder_out]
lowercase__ : List[Any] = [d[0][2] for d in decoded_decoder_out]
lowercase__ : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(a , a )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , a )
self.assertTrue(np.array_equal(a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , a , atol=1e-3 ) )
self.assertTrue(np.array_equal(a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , a , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : List[Any] = self.get_feature_extractor()
lowercase__ : str = self.get_tokenizer()
lowercase__ : int = self.get_decoder()
lowercase__ : str = WavaVecaProcessorWithLM(tokenizer=a , feature_extractor=a , decoder=a )
lowercase__ : Dict = self._get_dummy_logits()
lowercase__ : Optional[int] = 2.0
lowercase__ : Dict = 5.0
lowercase__ : List[Any] = -20.0
lowercase__ : Optional[Any] = True
lowercase__ : Optional[int] = processor.batch_decode(
a , alpha=a , beta=a , unk_score_offset=a , lm_score_boundary=a , )
lowercase__ : Optional[Any] = decoded_processor_out.text
lowercase__ : Union[str, Any] = list(a )
decoder.reset_params(
alpha=a , beta=a , unk_score_offset=a , lm_score_boundary=a , )
with get_context('fork' ).Pool() as pool:
lowercase__ : int = decoder.decode_beams_batch(
a , a , )
lowercase__ : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(a , a )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , a )
lowercase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , a )
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowercase__ : int = processor.decoder.model_container[processor.decoder._model_key]
lowercase__ : List[str] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
lowercase__ : List[str] = os.listdir(a )
lowercase__ : Dict = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Optional[int] = snapshot_download('hf-internal-testing/processor_with_lm' )
lowercase__ : str = WavaVecaProcessorWithLM.from_pretrained(a )
lowercase__ : int = processor.decoder.model_container[processor.decoder._model_key]
lowercase__ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
lowercase__ : Union[str, Any] = os.listdir(a )
lowercase__ : Dict = os.listdir(a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowercase__ : Any = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
lowercase__ : Dict = floats_list((3, 1_0_0_0) )
lowercase__ : Any = processor_wavaveca(a , return_tensors='np' )
lowercase__ : Optional[Any] = processor_auto(a , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
lowercase__ : Dict = self._get_dummy_logits()
lowercase__ : Optional[Any] = processor_wavaveca.batch_decode(a )
lowercase__ : int = processor_auto.batch_decode(a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : int = self.get_feature_extractor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_decoder()
lowercase__ : Any = WavaVecaProcessorWithLM(tokenizer=a , feature_extractor=a , decoder=a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def _UpperCAmelCase ( a , a ) -> List[str]:
lowercase__ : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self ) -> Any:
lowercase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowercase__ : Optional[int] = self._get_dummy_logits()[0]
lowercase__ : Any = processor.decode(a , output_word_offsets=a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(a , a ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def _UpperCAmelCase ( self ) -> int:
lowercase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
lowercase__ : Dict = self._get_dummy_logits()
lowercase__ : Dict = processor.batch_decode(a , output_word_offsets=a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(a , a ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(a , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase ( self ) -> str:
import torch
lowercase__ : Dict = load_dataset('common_voice' , 'en' , split='train' , streaming=a )
lowercase__ : Optional[Any] = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
lowercase__ : str = iter(a )
lowercase__ : str = next(a )
lowercase__ : Optional[int] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
lowercase__ : Any = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowercase__ : List[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
lowercase__ : int = model(a ).logits.cpu().numpy()
lowercase__ : List[str] = processor.decode(logits[0] , output_word_offsets=a )
lowercase__ : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowercase__ : Tuple = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
lowercase__ : List[Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(a , 'word' ) ) , a )
self.assertEqual(' '.join(self.get_from_offsets(a , 'word' ) ) , output.text )
# output times
lowercase__ : Any = torch.tensor(self.get_from_offsets(a , 'start_time' ) )
lowercase__ : Dict = torch.tensor(self.get_from_offsets(a , 'end_time' ) )
# fmt: off
lowercase__ : Tuple = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
lowercase__ : int = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(a , a , atol=0.01 ) )
self.assertTrue(torch.allclose(a , a , atol=0.01 ) )
| 77 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = """xlnet"""
_UpperCamelCase : Optional[Any] = ["""mems"""]
_UpperCamelCase : Tuple = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=3_2000 , snake_case=1024 , snake_case=24 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=True , snake_case="bi" , snake_case=0.02 , snake_case=1E-12 , snake_case=0.1 , snake_case=512 , snake_case=None , snake_case=True , snake_case=False , snake_case=False , snake_case=-1 , snake_case=False , snake_case="last" , snake_case=True , snake_case="tanh" , snake_case=0.1 , snake_case=5 , snake_case=5 , snake_case=5 , snake_case=1 , snake_case=2 , **snake_case , ):
lowercase = vocab_size
lowercase = d_model
lowercase = n_layer
lowercase = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowercase = d_model // n_head
lowercase = ff_activation
lowercase = d_inner
lowercase = untie_r
lowercase = attn_type
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = dropout
lowercase = mem_len
lowercase = reuse_len
lowercase = bi_data
lowercase = clamp_len
lowercase = same_length
lowercase = summary_type
lowercase = summary_use_proj
lowercase = summary_activation
lowercase = summary_last_dropout
lowercase = start_n_top
lowercase = end_n_top
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , snake_case , )
lowercase = kwargs['use_cache']
lowercase = use_mems_eval
lowercase = use_mems_train
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 195 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class a ( datasets.BeamBasedBuilder ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=A_ , )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A_ )
class a ( datasets.BeamBasedBuilder ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=A_ , )
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A_ )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class a ( UpperCAmelCase ):
@require_beam
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase : Tuple = DummyBeamDataset(cache_dir=A_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A_ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_UpperCAmelCase : int = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A_ )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ):
'''simple docstring'''
import apache_beam as beam
_UpperCAmelCase : Optional[int] = beam.io.parquetio.WriteToParquet
_UpperCAmelCase : Optional[int] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase : List[str] = DummyBeamDataset(cache_dir=A_ , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_UpperCAmelCase : Tuple = partial(A_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A_ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A_ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_UpperCAmelCase : Dict = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(A_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _UpperCAmelCase ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase : int = DummyBeamDataset(cache_dir=A_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_UpperCAmelCase : Optional[int] = NestedBeamDataset(cache_dir=A_ , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A_ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_UpperCAmelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , A_ )
self.assertEqual(dset["train"].info.splits["train"].num_examples , A_ )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A_ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 189 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class a ( unittest.TestCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = "ZinengTang/tvlt-base"
_UpperCAmelCase : int = tempfile.mkdtemp()
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : str = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A_ )
self.assertIsInstance(processor.image_processor , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Tuple = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : List[str] = np.ones([12000] )
_UpperCAmelCase : int = feature_extractor(A_ , return_tensors="np" )
_UpperCAmelCase : int = processor(audio=A_ , return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : Optional[Any] = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : Union[str, Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : Tuple = image_processor(A_ , return_tensors="np" )
_UpperCAmelCase : List[str] = processor(images=A_ , return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : Dict = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
_UpperCAmelCase : str = np.ones([12000] )
_UpperCAmelCase : Optional[Any] = np.ones([3, 224, 224] )
_UpperCAmelCase : List[Any] = processor(audio=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_feature_extractor()
_UpperCAmelCase : str = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
| 189 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=10 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=None , ):
a :Any = size if size is not None else {'''shortest_edge''': 18}
a :Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
a :Tuple = parent
a :str = batch_size
a :Any = num_channels
a :Optional[Any] = num_frames
a :Dict = image_size
a :List[Any] = min_resolution
a :Dict = max_resolution
a :Any = do_resize
a :Tuple = size
a :Tuple = do_normalize
a :Optional[int] = image_mean
a :int = image_std
a :Dict = crop_size
def SCREAMING_SNAKE_CASE__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = VivitImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = VivitImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
a :str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
a :Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
a :List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a :List[str] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a :Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
a :List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a :str = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a :Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
a :Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a :Any = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 94 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
_UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ )
_UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
_UpperCAmelCase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
_UpperCAmelCase : Any = {}
_UpperCAmelCase : Any = '''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
# extract state_dict for UNetLDM
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : int = '''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
_UpperCAmelCase : Dict = state_dict[key]
_UpperCAmelCase : List[str] = config.model.params.first_stage_config.params
_UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params
_UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_UpperCAmelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
_lowerCAmelCase :List[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 263 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = ["""model.decoder.embed_positions.weights"""]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
if "emb" in name:
lowercase__ = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
lowercase__ = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
lowercase__ = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
lowercase__ = name.replace('linear1' , 'fc1' )
if "linear2" in name:
lowercase__ = name.replace('linear2' , 'fc2' )
if "norm1" in name:
lowercase__ = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
lowercase__ = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
lowercase__ = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
lowercase__ = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
lowercase__ = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase__ = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[Dict, Dict]:
lowercase__ = list(state_dict.keys() )
lowercase__ = {}
for key in keys:
lowercase__ = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase__ = val[:hidden_size, :]
lowercase__ = val[hidden_size : 2 * hidden_size, :]
lowercase__ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase__ = val
else:
lowercase__ = val
return state_dict, enc_dec_proj_state_dict
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowercase__ = 1024
lowercase__ = 24
lowercase__ = 16
elif checkpoint == "medium":
lowercase__ = 1536
lowercase__ = 48
lowercase__ = 24
elif checkpoint == "large":
lowercase__ = 2048
lowercase__ = 48
lowercase__ = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowercase__ = MusicgenDecoderConfig(
hidden_size=_SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , )
return config
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="cpu" ) -> Optional[int]:
lowercase__ = MusicGen.get_pretrained(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
lowercase__ = decoder_config_from_checkpoint(_SCREAMING_SNAKE_CASE )
lowercase__ = fairseq_model.lm.state_dict()
lowercase__ , lowercase__ = rename_state_dict(
_SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size )
lowercase__ = TaEncoderModel.from_pretrained('t5-base' )
lowercase__ = EncodecModel.from_pretrained('facebook/encodec_32khz' )
lowercase__ = MusicgenForCausalLM(_SCREAMING_SNAKE_CASE ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase__ , lowercase__ = decoder.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowercase__ = MusicgenForConditionalGeneration(text_encoder=_SCREAMING_SNAKE_CASE , audio_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_SCREAMING_SNAKE_CASE )
# check we can do a forward pass
lowercase__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase__ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase__ = model(input_ids=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
lowercase__ = AutoTokenizer.from_pretrained('t5-base' )
lowercase__ = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
lowercase__ = MusicgenProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# set the appropriate bos/pad token ids
lowercase__ = 2048
lowercase__ = 2048
# set other default generation config params
lowercase__ = int(30 * audio_encoder.config.frame_rate )
lowercase__ = True
lowercase__ = 3.0
if pytorch_dump_folder is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE )
processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowercase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 269 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase )
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
_UpperCamelCase : ClassVar[Features] = Features({} )
_UpperCamelCase : str = "text"
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 269 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__snake_case =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] ):
lowerCAmelCase = state_dict.pop(lowerCamelCase )
lowerCAmelCase = val
def a_ ( lowerCamelCase : Optional[Any] ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowerCAmelCase = value
else:
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : Optional[Any] ):
lowerCAmelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:256, :]
lowerCAmelCase = in_proj_bias[:256]
lowerCAmelCase = in_proj_weight[256:512, :]
lowerCAmelCase = in_proj_bias[256:512]
lowerCAmelCase = in_proj_weight[-256:, :]
lowerCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase = in_proj_weight[:256, :]
lowerCAmelCase = in_proj_bias[:256]
lowerCAmelCase = in_proj_weight[256:512, :]
lowerCAmelCase = in_proj_bias[256:512]
lowerCAmelCase = in_proj_weight[-256:, :]
lowerCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowerCAmelCase = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase = in_proj_bias_cross_attn[:256]
lowerCAmelCase = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase = in_proj_bias_cross_attn[256:512]
lowerCAmelCase = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase = in_proj_bias_cross_attn[-256:]
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ):
lowerCAmelCase , lowerCAmelCase = image.size
lowerCAmelCase = max(lowerCamelCase , lowerCamelCase )
lowerCAmelCase = 800 if 'detection' in checkpoint_url else 1000
lowerCAmelCase = target_max_size / current_max_size
lowerCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a_ ( lowerCamelCase : Optional[Any] ):
lowerCAmelCase = F.to_tensor(lowerCamelCase )
lowerCAmelCase = F.normalize(lowerCamelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ):
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCAmelCase = rename_backbone_keys(lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowerCAmelCase = state_dict.pop(lowerCamelCase )
lowerCAmelCase = val
# create HuggingFace model and load state dict
lowerCAmelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCAmelCase = 15
lowerCAmelCase = 2
lowerCAmelCase = {0: 'table', 1: 'table rotated'}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase = 125
lowerCAmelCase = 6
lowerCAmelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
lowerCAmelCase = TableTransformerForObjectDetection(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# verify our conversion
lowerCAmelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
lowerCAmelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=lowerCamelCase )
lowerCAmelCase = Image.open(lowerCamelCase ).convert('RGB' )
lowerCAmelCase = normalize(resize(lowerCamelCase , lowerCamelCase ) ).unsqueeze(0 )
lowerCAmelCase = model(lowerCamelCase )
if "detection" in checkpoint_url:
lowerCAmelCase = (1, 15, 3)
lowerCAmelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowerCAmelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowerCAmelCase = (1, 125, 7)
lowerCAmelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowerCAmelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
lowerCAmelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(lowerCamelCase )
image_processor.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__snake_case =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
lowerCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('block' ) + len('block' )]
lowerCAmelCase = key.replace(f'''block{idx}''' , f'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
lowerCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(lowerCamelCase )-1}''' )
if "bot_conv" in key:
lowerCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase = value
return new_state_dict
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def a_ ( ):
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str]=None ):
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase = torch.load(lowerCamelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase = rename_keys(lowerCamelCase )
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
lowerCAmelCase = model(lowerCamelCase )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCamelCase , lowerCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
__snake_case =parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 4 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Dict:
'''simple docstring'''
__UpperCamelCase =tempfile.mkdtemp()
# fmt: off
__UpperCamelCase =['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
__UpperCamelCase ={
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__UpperCamelCase =os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple , **UpperCamelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] , **UpperCamelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] , **UpperCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__UpperCamelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__UpperCamelCase =[Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self : Any ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer()
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase =CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
__UpperCamelCase =CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase =CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__UpperCamelCase =self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
__UpperCamelCase =CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__UpperCamelCase =self.prepare_image_inputs()
__UpperCamelCase =image_processor(UpperCamelCase__ , return_tensors='''np''' )
__UpperCamelCase =processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =processor(text=UpperCamelCase__ )
__UpperCamelCase =tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =self.prepare_image_inputs()
__UpperCamelCase =processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(UpperCamelCase__ )
__UpperCamelCase =tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.get_image_processor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =CLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =self.prepare_image_inputs()
__UpperCamelCase =processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 85 | """simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =FileLock(str(tmpdir / '''foo.lock''' ) )
__UpperCamelCase =FileLock(str(tmpdir / '''foo.lock''' ) )
__UpperCamelCase =0.0_1
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
__UpperCamelCase =time.time()
locka.acquire(__UpperCamelCase )
assert time.time() - _start > timeout
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase ='''a''' * 1_0_0_0 + '''.lock'''
__UpperCamelCase =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__UpperCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
__UpperCamelCase =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCamelCase ):
locka.acquire(0 )
| 85 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.