code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__UpperCAmelCase , __UpperCAmelCase ) ) )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
lowerCAmelCase__ : Optional[Any] = (
"""Wrong input data's dimensions... """
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase__ : Optional[int] = (
"""Wrong input data's shape... """
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase__ : Union[str, Any] = (
"""Input data have different datatype... """
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = []
for value in value_array:
lowerCAmelCase__ : Optional[Any] = euclidean(__UpperCAmelCase , dataset[0] )
lowerCAmelCase__ : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase__ : int = euclidean(__UpperCAmelCase , __UpperCAmelCase )
if dist > temp_dist:
lowerCAmelCase__ : List[Any] = temp_dist
lowerCAmelCase__ : Optional[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
return np.dot(__UpperCAmelCase , __UpperCAmelCase ) / (norm(__UpperCAmelCase ) * norm(__UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 |
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _lowerCamelCase ( a_ ):
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self._create_example_records()
lowerCAmelCase__ : Tuple = Dataset.from_list(UpperCamelCase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(UpperCamelCase ):
self.assertDictEqual(UpperCamelCase , example_records[i] )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self._create_example_records()
lowerCAmelCase__ : Optional[Any] = Dataset.from_list(UpperCamelCase )
lowerCAmelCase__ : int = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
lowerCAmelCase__ : str = [{"""col_1""": 1}, {"""col_2""": """x"""}]
lowerCAmelCase__ : int = Dataset.from_list(UpperCamelCase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def _lowerCAmelCase ( self : str ) -> Dict: # checks if the type can be inferred from the second record
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
lowerCAmelCase__ : Optional[int] = Dataset.from_list(UpperCamelCase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Dataset.from_list([] )
self.assertEqual(len(UpperCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 242 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*a__ :Dict , a__ :Optional[Union[Dict, Any]] = None , a__ :List[str]=True , a__ :int=2 ):
"""simple docstring"""
from .. import __version__
UpperCamelCase__ = take_from
UpperCamelCase__ = ()
if not isinstance(args[0] , lowerCAmelCase__ ):
UpperCamelCase__ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCAmelCase__ ).base_version ) >= version.parse(lowerCAmelCase__ ):
raise ValueError(
f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'"""
f""" version {__version__} is >= {version_name}""" )
UpperCamelCase__ = None
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCAmelCase__ ),)
UpperCamelCase__ = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
values += (getattr(lowerCAmelCase__ , lowerCAmelCase__ ),)
UpperCamelCase__ = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
UpperCamelCase__ = f"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
UpperCamelCase__ = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , lowerCAmelCase__ , stacklevel=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCamelCase__ = inspect.getouterframes(inspect.currentframe() )[1]
UpperCamelCase__ = call_frame.filename
UpperCamelCase__ = call_frame.lineno
UpperCamelCase__ = call_frame.function
UpperCamelCase__ , UpperCamelCase__ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowerCAmelCase__ ) == 0:
return
elif len(lowerCAmelCase__ ) == 1:
return values[0]
return values
| 360 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=64 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = vocab_size - 1
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
UpperCamelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case : Dict = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : Dict = False
snake_case : Tuple = False
snake_case : Any = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = GPTNeoXModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowerCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
original_model.to(__lowerCAmelCase )
original_model.eval()
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
scaled_model.to(__lowerCAmelCase )
scaled_model.eval()
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase__ = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 )
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 87 | 0 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE : List[Any] = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE : int = logging.WARNING
def lowercase ( ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = os.getenv('''DATASETS_VERBOSITY''' , _snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowercase ( ) ->str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def lowercase ( ) ->logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Any = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Optional[int] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase ( _snake_case : Optional[str] = None ) ->logging.Logger:
"""simple docstring"""
if name is None:
__snake_case : Optional[int] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase ( ) ->int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def lowercase ( _snake_case : int ) ->None:
"""simple docstring"""
_get_library_root_logger().setLevel(_snake_case )
def lowercase ( ) ->str:
"""simple docstring"""
return set_verbosity(_snake_case )
def lowercase ( ) ->str:
"""simple docstring"""
return set_verbosity(_snake_case )
def lowercase ( ) ->Any:
"""simple docstring"""
return set_verbosity(_snake_case )
def lowercase ( ) ->Tuple:
"""simple docstring"""
return set_verbosity(_snake_case )
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : List[str] = False
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Any = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , *a_ , **a_ ): # pylint: disable=unused-argument
'''simple docstring'''
__snake_case : Dict = args[0] if args else None
def __iter__(self ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__(self , a_ ):
'''simple docstring'''
def empty_fn(*a_ , **a_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ):
'''simple docstring'''
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
return
SCREAMING_SNAKE_CASE : Optional[Any] = True
class _UpperCAmelCase :
'''simple docstring'''
def __call__(self , *a_ , a_=False , **a_ ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*a_ , **a_ )
else:
return EmptyTqdm(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
__snake_case : int = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE : Union[str, Any] = _tqdm_cls()
def lowercase ( ) ->bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def lowercase ( ) ->Optional[Any]:
"""simple docstring"""
global _tqdm_active
__snake_case : Optional[Any] = True
def lowercase ( ) ->Tuple:
"""simple docstring"""
global _tqdm_active
__snake_case : Optional[int] = False
| 102 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , )
def SCREAMING_SNAKE_CASE (self , a_ = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__(self , a_ , a_ = 5_12 , a_ = 5_12 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , a_ = None , **a_ , ):
'''simple docstring'''
if isinstance(a_ , a_ ):
__snake_case : Any = 1
elif isinstance(a_ , a_ ):
__snake_case : Any = len(a_ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a_ )}.""" )
# get prompt text embeddings
__snake_case : int = self.tokenizer(
a_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__snake_case : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__snake_case : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case : Union[str, Any] = text_embeddings.shape
__snake_case : Optional[int] = text_embeddings.repeat(1 , a_ , 1 )
__snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case : List[str]
if negative_prompt is None:
__snake_case : List[Any] = ['''''']
elif type(a_ ) is not type(a_ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !="""
f""" {type(a_ )}.""" )
elif isinstance(a_ , a_ ):
__snake_case : List[str] = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
__snake_case : Optional[int] = negative_prompt
__snake_case : Optional[int] = text_input_ids.shape[-1]
__snake_case : List[Any] = self.tokenizer(
a_ , padding='''max_length''' , max_length=a_ , truncation=a_ , return_tensors='''pt''' , )
__snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case : str = uncond_embeddings.shape[1]
__snake_case : int = uncond_embeddings.repeat(a_ , a_ , 1 )
__snake_case : int = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case : Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case : Union[str, Any] = torch.randn(
a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(self.device )
__snake_case : Tuple = torch.randn(a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(
self.device )
else:
__snake_case : Dict = torch.randn(
a_ , generator=a_ , device=self.device , dtype=a_ )
__snake_case : Dict = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__snake_case : Union[str, Any] = latents_reference.to(self.device )
__snake_case : Dict = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case : int = 0 if dx < 0 else dx
__snake_case : Union[str, Any] = 0 if dy < 0 else dy
__snake_case : str = max(-dx , 0 )
__snake_case : Tuple = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case : Any = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case : Tuple = {}
if accepts_eta:
__snake_case : List[str] = eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
__snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case : Tuple = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
__snake_case : int = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case : Tuple = noise_pred.chunk(2 )
__snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case : Optional[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
__snake_case : Union[str, Any] = 1 / 0.1_8215 * latents
__snake_case : Optional[Any] = self.vae.decode(a_ ).sample
__snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case : Optional[int] = self.feature_extractor(self.numpy_to_pil(a_ ) , return_tensors='''pt''' ).to(
self.device )
__snake_case , __snake_case : List[Any] = self.safety_checker(
images=a_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case : Union[str, Any] = None
if output_type == "pil":
__snake_case : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
| 102 | 1 |
"""simple docstring"""
def _A ( UpperCamelCase_ : int) -> bool:
return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a = int(input('Enter number: ').strip())
print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 357 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_a = pytest.mark.integration
_a = {'comet'}
_a = importlib.util.find_spec('fairseq') is not None
_a = {'code_eval'}
_a = os.name == 'nt'
_a = {'bertscore', 'frugalscore', 'perplexity'}
_a = importlib.util.find_spec('transformers') is not None
def _A ( UpperCamelCase_ : Dict) -> Any:
'''simple docstring'''
@wraps(UpperCamelCase_)
def wrapper(self : Dict, UpperCamelCase_ : Dict):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"")
else:
test_case(self, UpperCamelCase_)
return wrapper
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_)
def wrapper(self : int, UpperCamelCase_ : str):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"")
else:
test_case(self, UpperCamelCase_)
return wrapper
def _A ( UpperCamelCase_ : Tuple) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_)
def wrapper(self : Optional[int], UpperCamelCase_ : Optional[Any]):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"")
else:
test_case(self, UpperCamelCase_)
return wrapper
def _A ( ) -> str:
'''simple docstring'''
__lowercase = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./metrics/*/")]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase ,lowercase ,lowercase )
@local
class _lowerCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = {}
__UpperCAmelCase : Tuple = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
__lowercase = "[...]"
__lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics", UpperCAmelCase__ ) ).module_path )
__lowercase = datasets.load.import_main_class(metric_module.__name__, dataset=UpperCAmelCase__ )
# check parameters
__lowercase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCAmelCase__, metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowercase = doctest.testmod(UpperCAmelCase__, verbose=UpperCAmelCase__, raise_on_error=UpperCAmelCase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed, 0 )
self.assertGreater(results.attempted, 1 )
@slow
def _lowercase ( self : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = "[...]"
__lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics", UpperCAmelCase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowercase = doctest.testmod(UpperCAmelCase__, verbose=UpperCAmelCase__, raise_on_error=UpperCAmelCase__ )
self.assertEqual(results.failed, 0 )
self.assertGreater(results.attempted, 1 )
@contextmanager
def _lowercase ( self : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Tuple ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase__ ):
yield
else:
yield
@contextmanager
def _lowercase ( self : List[Any] ):
def load_local_metric(UpperCAmelCase__ : Any, *UpperCAmelCase__ : List[Any], **UpperCAmelCase__ : Any ):
return load_metric(os.path.join("metrics", UpperCAmelCase__ ), *UpperCAmelCase__, **UpperCAmelCase__ )
with patch("datasets.load_metric" ) as mock_load_metric:
__lowercase = load_local_metric
yield
@classmethod
def _lowercase ( cls : Optional[Any], UpperCAmelCase__ : List[Any] ):
def wrapper(UpperCAmelCase__ : Tuple ):
__lowercase = contextmanager(UpperCAmelCase__ )
__lowercase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt")
def _A ( UpperCamelCase_ : Any) -> Optional[Any]:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv", "", "") # handle pytest cli flags
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : Tuple, UpperCAmelCase__ : Tuple ):
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor") as mock_create_predictor:
__lowercase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore")
def _A ( UpperCamelCase_ : Tuple) -> int:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : Tuple, UpperCamelCase_ : str, *UpperCamelCase_ : Optional[Any], **UpperCamelCase_ : Dict):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_))
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model"), patch(
"bert_score.scorer.bert_cos_score_idf") as mock_bert_cos_score_idf:
__lowercase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet")
def _A ( UpperCamelCase_ : Tuple) -> List[Any]:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple):
class _lowerCAmelCase :
"""simple docstring"""
def _lowercase ( self : str, UpperCAmelCase__ : int, *UpperCAmelCase__ : Dict, **UpperCAmelCase__ : Dict ):
assert len(UpperCAmelCase__ ) == 2
__lowercase = [0.19, 0.92]
return scores, sum(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model") as mock_download_model:
__lowercase = None
with patch("comet.load_from_checkpoint") as mock_load_from_checkpoint:
__lowercase = load_from_checkpoint
yield
def _A ( ) -> Tuple:
'''simple docstring'''
__lowercase = load_metric(os.path.join("metrics", "seqeval"))
__lowercase = "ERROR"
__lowercase = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_, match=re.escape(UpperCamelCase_)):
metric.compute(predictions=[], references=[], scheme=UpperCamelCase_)
| 144 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int = 1_000 ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = 1, 1
_UpperCAmelCase : Tuple = []
for i in range(1, n + 1 ):
_UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
_UpperCAmelCase : Optional[Any] = prev_numerator + prev_denominator
if len(str(a_ ) ) > len(str(a_ ) ):
result.append(a_ )
_UpperCAmelCase : List[str] = numerator
_UpperCAmelCase : Union[str, Any] = denominator
return len(a_ )
if __name__ == "__main__":
print(f'{solution() = }') | 145 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = '''ibert'''
def __init__( self : int , lowerCAmelCase__ : Any=3_0_5_2_2 , lowerCAmelCase__ : List[Any]=7_6_8 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : List[Any]=1_2 , lowerCAmelCase__ : Optional[int]=3_0_7_2 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-12 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Optional[Any]="absolute" , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Tuple="none" , **lowerCAmelCase__ : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Union[str, Any] = type_vocab_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : Any = quant_mode
_UpperCAmelCase : Optional[Any] = force_dequant
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 145 | 1 |
import operator
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None ) -> list:
"""simple docstring"""
A : List[str] = operator.lt if reverse else operator.gt
A : List[str] = solution or []
if not arr:
return solution
A : Dict = [arr.pop(0 )]
for i, item in enumerate(_lowerCAmelCase ):
if _operator(_lowerCAmelCase , sublist[-1] ):
sublist.append(_lowerCAmelCase )
arr.pop(_lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(_lowerCAmelCase )
else:
while sublist:
A : Optional[Any] = sublist.pop(0 )
for i, xx in enumerate(_lowerCAmelCase ):
if not _operator(_lowerCAmelCase , _lowerCAmelCase ):
solution.insert(_lowerCAmelCase , _lowerCAmelCase )
break
else:
solution.append(_lowerCAmelCase )
strand_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 360 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=14, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=0.02, ):
A : List[str] = parent
A : Any = batch_size
A : Dict = seq_length
A : Tuple = is_training
A : Any = use_input_mask
A : Any = use_token_type_ids
A : Any = use_labels
A : Optional[int] = vocab_size
A : Dict = hidden_size
A : Dict = rotary_dim
A : Dict = num_hidden_layers
A : Tuple = num_attention_heads
A : Tuple = intermediate_size
A : Union[str, Any] = hidden_act
A : Dict = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : str = initializer_range
A : Any = None
A : Any = vocab_size - 1
A : int = vocab_size - 1
A : int = vocab_size - 1
def _lowerCAmelCase ( self ):
A : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Optional[int] = None
if self.use_input_mask:
A : Any = random_attention_mask([self.batch_size, self.seq_length] )
A : int = GPTJConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=lowerCamelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, )
return (config, input_ids, input_mask)
def _lowerCAmelCase ( self ):
A : List[str] = self.prepare_config_and_inputs()
A , A , A : List[str] = config_and_inputs
A : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[int] = 20
A : Tuple = model_class_name(lowerCamelCase__ )
A : Dict = model.init_cache(input_ids.shape[0], lowerCamelCase__ )
A : int = jnp.ones((input_ids.shape[0], max_decoder_length), dtype="""i4""" )
A : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
A : List[Any] = model(
input_ids[:, :-1], attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, position_ids=lowerCamelCase__, )
A : List[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="""i4""" )
A : Any = model(
input_ids[:, -1:], attention_mask=lowerCamelCase__, past_key_values=outputs_cache.past_key_values, position_ids=lowerCamelCase__, )
A : Any = model(lowerCamelCase__ )
A : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = 20
A : Any = model_class_name(lowerCamelCase__ )
A : Dict = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, )
A : str = model.init_cache(input_ids.shape[0], lowerCamelCase__ )
A : Any = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) )
A : Optional[int] = model(
input_ids[:, :-1], attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, position_ids=lowerCamelCase__, )
A : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype="""i4""" )
A : List[Any] = model(
input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=lowerCamelCase__, position_ids=lowerCamelCase__, )
A : Union[str, Any] = model(lowerCamelCase__, attention_mask=lowerCamelCase__ )
A : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[int] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _lowerCAmelCase ( self ):
A : List[Any] = FlaxGPTJModelTester(self )
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A , A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A , A , A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
@tooslow
def _lowerCAmelCase ( self ):
A : int = GPTaTokenizer.from_pretrained("""gpt2""", pad_token="""<|endoftext|>""", padding_side="""left""" )
A : Optional[int] = tokenizer(["""Hello this is a long string""", """Hey"""], return_tensors="""np""", padding=lowerCamelCase__, truncation=lowerCamelCase__ )
A : Dict = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
A : str = False
A : Optional[Any] = model.config.eos_token_id
A : Union[str, Any] = jax.jit(model.generate )
A : str = jit_generate(
inputs["""input_ids"""], attention_mask=inputs["""attention_mask"""], pad_token_id=tokenizer.pad_token_id ).sequences
A : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__, skip_special_tokens=lowerCamelCase__ )
A : Tuple = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCamelCase__, lowerCamelCase__ )
@is_pt_flax_cross_test
def _lowerCAmelCase ( self ):
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A : Any = self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ )
A : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
A : str = getattr(lowerCamelCase__, lowerCamelCase__ )
A , A : Optional[int] = pt_inputs["""input_ids"""].shape
A : List[str] = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
A : List[Any] = 0
A : Tuple = 1
A : Optional[int] = 0
A : str = 1
A : Dict = pt_model_class(lowerCamelCase__ ).eval()
A : int = model_class(lowerCamelCase__, dtype=jnp.floataa )
A : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCamelCase__ )
A : Dict = fx_state
with torch.no_grad():
A : Optional[int] = pt_model(**lowerCamelCase__ ).to_tuple()
A : str = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ), len(lowerCamelCase__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCamelCase__ )
A : Union[str, Any] = model_class.from_pretrained(lowerCamelCase__, from_pt=lowerCamelCase__ )
A : Any = fx_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ), len(lowerCamelCase__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2 )
@is_pt_flax_cross_test
def _lowerCAmelCase ( self ):
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
A : int = self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
A : Dict = getattr(lowerCamelCase__, lowerCamelCase__ )
A : int = pt_model_class(lowerCamelCase__ ).eval()
A : int = model_class(lowerCamelCase__, dtype=jnp.floataa )
A : List[str] = load_flax_weights_in_pytorch_model(lowerCamelCase__, fx_model.params )
A , A : Optional[int] = pt_inputs["""input_ids"""].shape
A : Optional[int] = np.random.randint(0, seq_length - 1, size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
A : Tuple = 0
A : Tuple = 1
A : str = 0
A : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A : List[str] = pt_model(**lowerCamelCase__ ).to_tuple()
A : Optional[int] = fx_model(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ), len(lowerCamelCase__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCamelCase__ )
A : str = pt_model_class.from_pretrained(lowerCamelCase__, from_flax=lowerCamelCase__ )
with torch.no_grad():
A : str = pt_model_loaded(**lowerCamelCase__ ).to_tuple()
self.assertEqual(
len(lowerCamelCase__ ), len(lowerCamelCase__ ), """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 )
@tooslow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
A : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 115 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCamelCase :Dict = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase :Any = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase :List[Any] = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : Tuple = spearmanr(_A , _A )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 206 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase__ : str = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : int = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase__ : List[Any] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase__ : str = tempfile.mkdtemp()
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , _A )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
# load decoder from hub
UpperCAmelCase__ : Tuple = '''hf-internal-testing/ngram-beam-search-decoder'''
def lowercase_ ( self : Union[str, Any] , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.add_kwargs_tokens_map.copy()
kwargs.update(_A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Dict , **_A : str ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Tuple , **_A : int ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_A , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_feature_extractor()
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : str = feature_extractor(_A , return_tensors='''np''' )
UpperCAmelCase__ : Optional[Any] = processor(_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Union[str, Any] = processor(text=_A )
UpperCAmelCase__ : Dict = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : str , _A : int=(2, 10, 16) , _A : Optional[int]=77 ):
'''simple docstring'''
np.random.seed(_A )
return np.random.rand(*_A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Tuple = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase__ : Union[str, Any] = processor.decode(_A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(_A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def lowercase_ ( self : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(_A )
else:
with get_context(_A ).Pool() as pool:
UpperCAmelCase__ : List[Any] = processor.batch_decode(_A , _A )
UpperCAmelCase__ : List[str] = list(_A )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(_A , _A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_A , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_A , decoded_processor.logit_score )
self.assertListEqual(_A , decoded_processor.lm_score )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : List[Any] = self._get_dummy_logits()
UpperCAmelCase__ : Optional[Any] = 15
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : List[str] = -4.0
UpperCAmelCase__ : str = processor.batch_decode(
_A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Union[str, Any] = decoded_processor_out.text
UpperCAmelCase__ : Tuple = list(_A )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
_A , _A , beam_width=_A , beam_prune_logp=_A , token_min_logp=_A , )
UpperCAmelCase__ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _A )
self.assertTrue(np.array_equal(_A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , _A , atol=1e-3 ) )
self.assertTrue(np.array_equal(_A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , _A , atol=1e-3 ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = 2.0
UpperCAmelCase__ : Union[str, Any] = 5.0
UpperCAmelCase__ : Any = -2_0.0
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Any = processor.batch_decode(
_A , alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
UpperCAmelCase__ : List[Any] = decoded_processor_out.text
UpperCAmelCase__ : List[Any] = list(_A )
decoder.reset_params(
alpha=_A , beta=_A , unk_score_offset=_A , lm_score_boundary=_A , )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase__ : Any = decoder.decode_beams_batch(
_A , _A , )
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_A , _A )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _A )
UpperCAmelCase__ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Dict = os.listdir(_A )
UpperCAmelCase__ : str = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(_A )
UpperCAmelCase__ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[Any] = os.listdir(_A )
UpperCAmelCase__ : int = os.listdir(_A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Any = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase__ : Tuple = processor_wavaveca(_A , return_tensors='''np''' )
UpperCAmelCase__ : Union[str, Any] = processor_auto(_A , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase__ : str = self._get_dummy_logits()
UpperCAmelCase__ : List[Any] = processor_wavaveca.batch_decode(_A )
UpperCAmelCase__ : List[Any] = processor_auto.batch_decode(_A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : List[str] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=_A , feature_extractor=_A , decoder=_A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def lowercase_ ( _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()[0]
UpperCAmelCase__ : int = processor.decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Dict = processor.batch_decode(_A , output_word_offsets=_A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase_ ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Dict = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_A )
UpperCAmelCase__ : Optional[Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Any = iter(_A )
UpperCAmelCase__ : Dict = next(_A )
UpperCAmelCase__ : Optional[int] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase__ : List[Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Union[str, Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A ).logits.cpu().numpy()
UpperCAmelCase__ : List[str] = processor.decode(logits[0] , output_word_offsets=_A )
UpperCAmelCase__ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : List[str] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase__ : Tuple = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , _A )
self.assertEqual(''' '''.join(self.get_from_offsets(_A , '''word''' ) ) , output.text )
# output times
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(_A , '''start_time''' ) )
UpperCAmelCase__ : Dict = torch.tensor(self.get_from_offsets(_A , '''end_time''' ) )
# fmt: off
UpperCAmelCase__ : Any = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : Any = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(_A , _A , atol=0.0_1 ) )
| 181 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=7 , lowerCamelCase_=3 , lowerCamelCase_=18 , lowerCamelCase_=30 , lowerCamelCase_=4_00 , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=None , lowerCamelCase_=True , lowerCamelCase_=[0.5, 0.5, 0.5] , lowerCamelCase_=[0.5, 0.5, 0.5] , ) -> Any:
lowerCAmelCase__ = size if size is not None else {'''shortest_edge''': 18}
lowerCAmelCase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean
lowerCAmelCase__ = image_std
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = LevitImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = LevitImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase_ , '''size''' ) )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowerCAmelCase__ = image_processing(lowerCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 228 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=99 , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=9 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_=8 , lowerCamelCase_=0.1 , lowerCamelCase_=0.002 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=None , lowerCamelCase_=None , ) -> str:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = encoder_seq_length
lowerCAmelCase__ = decoder_seq_length
# For common tests
lowerCAmelCase__ = self.decoder_seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = decoder_start_token_id
lowerCAmelCase__ = None
lowerCAmelCase__ = decoder_layers
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> List[str]:
if attention_mask is None:
lowerCAmelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase_ )
if decoder_head_mask is None:
lowerCAmelCase__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase_ )
if cross_attn_head_mask is None:
lowerCAmelCase__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = config.num_attention_heads
lowerCAmelCase__ = self.prepare_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, input_dict
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Dict:
lowerCAmelCase__ = UMTaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(
input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ )
lowerCAmelCase__ = result.last_hidden_state
lowerCAmelCase__ = result.past_key_values
lowerCAmelCase__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
lowerCAmelCase__ = UMTaModel(config=lowerCamelCase_ ).get_decoder().to(lowerCamelCase_ ).eval()
# first forward pass
lowerCAmelCase__ = model(lowerCamelCase_ , use_cache=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , use_cache=lowerCamelCase_ )
self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) )
self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) + 1 )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = model(lowerCamelCase_ )['''last_hidden_state''']
lowerCAmelCase__ = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ )['''last_hidden_state''']
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
lowerCAmelCase__ = UMTaModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).half().eval()
lowerCAmelCase__ = model(**lowerCamelCase_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(lowerCamelCase_ ).any().item() )
@require_torch
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase__ : List[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase__ : Dict = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase__ : Optional[int] = True
lowercase__ : Tuple = False
lowercase__ : Optional[int] = False
lowercase__ : Optional[Any] = True
lowercase__ : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase__ : int = [0.8, 0.9]
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=lowerCamelCase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = config_and_inputs[0]
lowerCAmelCase__ = UMTaForConditionalGeneration(lowerCamelCase_ ).eval()
model.to(lowerCamelCase_ )
lowerCAmelCase__ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase_ , head_masking.items() ):
lowerCAmelCase__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ )
lowerCAmelCase__ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase_ , return_dict_in_generate=lowerCamelCase_ , **lowerCamelCase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=lowerCamelCase_ ).to(lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=lowerCamelCase_ , legacy=lowerCamelCase_ )
lowerCAmelCase__ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ ).input_ids
# fmt: off
lowerCAmelCase__ = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model.generate(input_ids.to(lowerCamelCase_ ) )
lowerCAmelCase__ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowerCAmelCase__ = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) | 228 | 1 |
from __future__ import annotations
def a ( snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase_ = result + left + right
return input_list
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return input_list
lowercase_ = list(snake_case__ )
# iteration for two-way merging
lowercase_ = 2
while p <= len(snake_case__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowercase_ = i
lowercase_ = i + p - 1
lowercase_ = (low + high + 1) // 2
lowercase_ = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# final merge of last two parts
if p * 2 >= len(snake_case__ ):
lowercase_ = i
lowercase_ = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__a = []
else:
__a = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 30 | def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 87 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Any = logging.get_logger(__name__)
def A (__A : Optional[int] , __A : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A (__A : Tuple , __A : Tuple , __A : List[Any]=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = in_proj_bias[-config.hidden_size :]
def A (__A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__A , __A )
def A (__A : str , __A : Union[str, Any] , __A : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = dct.pop(__A )
UpperCAmelCase_ = val
def A () -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A (__A : Tuple , __A : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = ViTConfig()
UpperCAmelCase_ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = True
UpperCAmelCase_ = int(vit_name[-12:-10] )
UpperCAmelCase_ = int(vit_name[-9:-6] )
else:
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = int(vit_name[-6:-4] )
UpperCAmelCase_ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
UpperCAmelCase_ = 192
UpperCAmelCase_ = 768
UpperCAmelCase_ = 12
UpperCAmelCase_ = 3
elif vit_name[9:].startswith('''small''' ):
UpperCAmelCase_ = 384
UpperCAmelCase_ = 1536
UpperCAmelCase_ = 12
UpperCAmelCase_ = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
UpperCAmelCase_ = 768
UpperCAmelCase_ = 2304
UpperCAmelCase_ = 8
UpperCAmelCase_ = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
elif vit_name[4:].startswith('''huge''' ):
UpperCAmelCase_ = 1280
UpperCAmelCase_ = 5120
UpperCAmelCase_ = 32
UpperCAmelCase_ = 16
# load original model from timm
UpperCAmelCase_ = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
UpperCAmelCase_ = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
UpperCAmelCase_ = ViTModel(__A ).eval()
else:
UpperCAmelCase_ = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
UpperCAmelCase_ = DeiTImageProcessor(size=config.image_size )
else:
UpperCAmelCase_ = ViTImageProcessor(size=config.image_size )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = model(__A )
if base_model:
UpperCAmelCase_ = timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1E-3 )
else:
UpperCAmelCase_ = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
snake_case_ : int = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 7 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : str ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str =['''YolosFeatureExtractor''']
A__ : Optional[Any] =['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] =[
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowercase__ ( unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = JukeboxTokenizer
_UpperCAmelCase :List[Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase__ ( self : Dict ):
import torch
lowerCamelCase_ : Any =JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowerCamelCase_ : List[Any] =tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCamelCase_ : Union[str, Any] =[
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self : str ):
import torch
lowerCamelCase_ : Any =JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowerCamelCase_ : Optional[int] =tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCamelCase_ : Union[str, Any] =[
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 144 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : Optional[int] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetReader(lowercase , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(lowercase , cache_dir=lowercase , split=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = parquet_path
elif issubclass(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path]
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_dataset(lowercase , lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=("train",) ):
"""simple docstring"""
assert isinstance(lowercase , lowercase )
for split in splits:
SCREAMING_SNAKE_CASE : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : Dict = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE : str = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader({"train": parquet_path} , features=lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE : Tuple = "train"
SCREAMING_SNAKE_CASE : int = {"train": parquet_path, "test": parquet_path}
SCREAMING_SNAKE_CASE : Dict = tmp_path / "cache"
SCREAMING_SNAKE_CASE : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
SCREAMING_SNAKE_CASE : int = ParquetDatasetReader(lowercase , cache_dir=lowercase ).read()
_check_parquet_datasetdict(lowercase , lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
SCREAMING_SNAKE_CASE : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(shared_datadir / "test_image_rgb.jpg" )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"image": [image_path]}
SCREAMING_SNAKE_CASE : Union[str, Any] = Features({"image": Image()} )
SCREAMING_SNAKE_CASE : int = Dataset.from_dict(lowercase , features=lowercase )
SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
SCREAMING_SNAKE_CASE : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE : Any = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert get_writer_batch_size(lowercase ) == expected
| 319 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''timm_backbone'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = backbone
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : List[Any] = out_indices if out_indices is not None else (-1,)
| 319 | 1 |
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __magic_name__ ( A : Tuple=None ):
'''simple docstring'''
a = argparse.ArgumentParser(add_help=_UpperCamelCase, allow_abbrev=_UpperCamelCase )
# The main config parser
a = config_command_parser(_UpperCamelCase )
# The subparser to add commands to
a = config_parser.add_subparsers(title="subcommands", dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(_UpperCamelCase, parents=[parent_parser] )
update_command_parser(_UpperCamelCase, parents=[parent_parser] )
return config_parser
def __magic_name__ ( ):
'''simple docstring'''
a = get_config_parser()
a = config_parser.parse_args()
if not hasattr(_UpperCamelCase, "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 107 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Any = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 115 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_SCREAMING_SNAKE_CASE : Optional[Any] = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_SCREAMING_SNAKE_CASE : str = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_SCREAMING_SNAKE_CASE : int = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_SCREAMING_SNAKE_CASE : Dict = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_SCREAMING_SNAKE_CASE : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Tuple = FLAX_MODEL_MAPPING
_SCREAMING_SNAKE_CASE : int = auto_class_update(FlaxAutoModel)
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Union[str, Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_SCREAMING_SNAKE_CASE : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : int = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : str = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_SCREAMING_SNAKE_CASE : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Tuple = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_SCREAMING_SNAKE_CASE : Any = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Union[str, Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : Any = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : Dict = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class _snake_case ( _BaseAutoModelClass ):
lowerCAmelCase_ : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 92 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
while b:
snake_case_ , snake_case_ = b, a % b
return a
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(snake_case , a % b )
def UpperCamelCase_( ):
'''simple docstring'''
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 92 | 1 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
a = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = self.dummy_uncond_unet
a = PNDMScheduler()
a = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
a = torch.manual_seed(0 )
a = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type="""numpy""" ).images
a = torch.manual_seed(0 )
a = pndm(generator=__magic_name__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=__magic_name__ )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = """google/ddpm-cifar10-32"""
a = UNetaDModel.from_pretrained(__magic_name__ )
a = PNDMScheduler()
a = PNDMPipeline(unet=__magic_name__ , scheduler=__magic_name__ )
pndm.to(__magic_name__ )
pndm.set_progress_bar_config(disable=__magic_name__ )
a = torch.manual_seed(0 )
a = pndm(generator=__magic_name__ , output_type="""numpy""" ).images
a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 228 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = GPTaTokenizer
UpperCamelCase__ = GPTaTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''add_prefix_space''': True}
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
a = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a = {"""unk_token""": """<unk>"""}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowerCamelCase__ ( self :Dict , **__magic_name__ :List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :List[str] , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[str] ):
'''simple docstring'''
a = """lower newer"""
a = """lower newer"""
return input_text, output_text
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = """lower newer"""
a = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
a = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokens + [tokenizer.unk_token]
a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
a = """lower newer"""
# Testing tokenization
a = tokenizer.tokenize(__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids without special tokens
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing conversion to ids with special tokens
a = self.get_rust_tokenizer(add_prefix_space=__magic_name__ )
a = tokenizer.encode(__magic_name__ , add_prefix_space=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# Testing the unknown token
a = tokens + [rust_tokenizer.unk_token]
a = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :Optional[int] , *__magic_name__ :Tuple , **__magic_name__ :str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Simple input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(__magic_name__ , tokenizer_r.encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" )
# Pair input
self.assertRaises(
__magic_name__ , tokenizer_r.batch_encode_plus , __magic_name__ , max_length=__magic_name__ , padding="""max_length""" , )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
a = """This is a simple input"""
a = ["""This is a simple input looooooooong""", """This is a simple input"""]
a = ("""This is a simple input""", """This is a pair""")
a = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
a = tokenizer.pad_token_id
a = tokenizer(__magic_name__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
a = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
a = tokenizer(*__magic_name__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
a = tokenizer(__magic_name__ , padding=__magic_name__ , truncate=__magic_name__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = """$$$"""
a = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__magic_name__ , add_bos_token=__magic_name__ )
a = """This is a simple input"""
a = ["""This is a simple input 1""", """This is a simple input 2"""]
a = tokenizer.bos_token_id
a = tokenizer(__magic_name__ )
a = tokenizer(__magic_name__ )
self.assertEqual(out_s.input_ids[0] , __magic_name__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a = tokenizer.decode(out_s.input_ids )
a = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __magic_name__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = [self.get_tokenizer(do_lower_case=__magic_name__ , add_bos_token=__magic_name__ )]
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
a = """Encode this."""
a = """This one too please."""
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
encoded_sequence += tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer.encode_plus(
__magic_name__ , __magic_name__ , add_special_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , )
a = encoded_sequence_dict["""input_ids"""]
a = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) )
a = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__magic_name__ )
]
a = [x for x in filtered_sequence if x is not None]
self.assertEqual(__magic_name__ , __magic_name__ )
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
a = AutoTokenizer.from_pretrained("""./test_opt""" )
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=__magic_name__ )
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
# Same as above
self.assertEqual(__magic_name__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=__magic_name__ )
a = """bos"""
a = tokenizer.get_vocab()["""bos"""]
a = """A photo of a cat"""
a = tokenizer.encode(
__magic_name__ , )
# We changed the bos token
self.assertEqual(__magic_name__ , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
a = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
a = tokenizer.encode(
__magic_name__ , )
self.assertEqual(__magic_name__ , [3_1957, 250, 1345, 9, 10, 4758] )
| 228 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =0
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase : str =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase : Dict =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(snake_case__ ) , 0 )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(snake_case__ , config=snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(snake_case__ , '''vocab.txt''' ) )
UpperCAmelCase : int =AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type='''bert''' , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(snake_case__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(snake_case__ , '''merges.txt''' ) )
UpperCAmelCase : Any =AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type='''gpt2''' , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(snake_case__ , '''vocab.txt''' ) )
UpperCAmelCase : Optional[int] =AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type='''bert''' )
self.assertIsInstance(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(snake_case__ , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(snake_case__ , '''merges.txt''' ) )
UpperCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained(snake_case__ , tokenizer_type='''gpt2''' )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
with pytest.raises(snake_case__ ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase : Any =tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
if isinstance(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , snake_case__ )
else:
self.assertEqual(tokenizer.do_lower_case , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
snake_case__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCAmelCase : Dict =tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple =TOKENIZER_MAPPING.values()
UpperCAmelCase : Any =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(snake_case__ )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=snake_case__ ) , snake_case__ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , snake_case__ )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=snake_case__ )
UpperCAmelCase : Tuple ='''Hello, world. How are you?'''
UpperCAmelCase : Optional[int] =tokenizer.tokenize(snake_case__ )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCAmelCase : Optional[int] =AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=snake_case__ )
UpperCAmelCase : int =tokenizer.tokenize(snake_case__ )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 3_0000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Any =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict =get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase : Dict =config.pop('''_commit_hash''' , snake_case__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(snake_case__ , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase : List[str] =get_tokenizer_config(snake_case__ )
self.assertDictEqual(snake_case__ , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCAmelCase : Optional[Any] =get_tokenizer_config(snake_case__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
UpperCAmelCase : Optional[Any] =CustomTokenizer.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , snake_case__ )
# Can register in two steps
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
snake_case__ , slow_tokenizer_class=snake_case__ , fast_tokenizer_class=snake_case__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Optional[int] =BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
UpperCAmelCase : List[str] =CustomTokenizerFast.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCAmelCase : str =AutoTokenizer.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
UpperCAmelCase : int =AutoTokenizer.from_pretrained(snake_case__ , use_fast=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaises(snake_case__ ):
UpperCAmelCase : List[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ )
UpperCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCAmelCase : str =AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase : Any =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ )
UpperCAmelCase : List[Any] =AutoTokenizer.from_pretrained(snake_case__ , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] = False
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = NewTokenizer
__lowerCamelCase : List[Any] = False
try:
AutoConfig.register('''custom''' , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoTokenizer.register(snake_case__ , fast_tokenizer_class=snake_case__ )
# If remote code is not set, the default is to use local
UpperCAmelCase : Any =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase : Optional[Any] =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase : str =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase : Optional[int] =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=snake_case__ , use_fast=snake_case__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase : int =AutoTokenizer.from_pretrained('''bert-base''' )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase : Union[str, Any] =AutoTokenizer.from_pretrained(snake_case__ , revision='''aaaaaa''' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 353 | import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.json'''}
__snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__snake_case = {'''mgp-str''': 27}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ) -> Any:
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : int =json.load(snake_case__ )
UpperCAmelCase : List[str] ={v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(snake_case__ ) )
return
UpperCAmelCase : List[Any] =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
return (vocab_file,)
| 78 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> List[Any]:
'''simple docstring'''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
A__ = ''
else:
A__ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( ) -> Any:
'''simple docstring'''
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
'''simple docstring'''
A__ = ViTConfig()
A__ = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A__ = True
A__ = int(vit_name[-12:-10] )
A__ = int(vit_name[-9:-6] )
else:
A__ = 1000
A__ = 'huggingface/label-files'
A__ = 'imagenet-1k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = int(vit_name[-6:-4] )
A__ = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
A__ = 192
A__ = 768
A__ = 12
A__ = 3
elif vit_name[9:].startswith('small' ):
A__ = 384
A__ = 1536
A__ = 12
A__ = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
A__ = 768
A__ = 2304
A__ = 8
A__ = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
elif vit_name[4:].startswith('huge' ):
A__ = 1280
A__ = 5120
A__ = 32
A__ = 16
# load original model from timm
A__ = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A__ = ViTModel(SCREAMING_SNAKE_CASE__ ).eval()
else:
A__ = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A__ = DeiTImageProcessor(size=config.image_size )
else:
A__ = ViTImageProcessor(size=config.image_size )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(SCREAMING_SNAKE_CASE__ )
if base_model:
A__ = timm_model.forward_features(SCREAMING_SNAKE_CASE__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.pooler_output , atol=1E-3 )
else:
A__ = timm_model(SCREAMING_SNAKE_CASE__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 7 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : Union[str, None] = None ) -> None:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
fire.Fire(convert)
| 7 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ) -> Tuple:
model.train()
snake_case_ = model(__lowerCamelCase )
snake_case_ = F.mse_loss(__lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(__lowerCamelCase )
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(__lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case_ = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ = LambdaLR(__lowerCamelCase , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.65 )
snake_case_ = LambdaLR(__lowerCamelCase , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_ = accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
snake_case_ = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _a ( _SCREAMING_SNAKE_CASE ) -> Tuple:
# Test when on a single CPU or GPU that the context manager does nothing
snake_case_ = get_training_setup(__lowerCamelCase )
# Use a single batch
snake_case_ = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
# Test on distributed setup that context manager behaves properly
snake_case_ = get_training_setup(__lowerCamelCase )
# Use a single batch
snake_case_ = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def _a ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
snake_case_ = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ = get_training_setup(__lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
GradientState._reset_state()
def _a ( _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> List[str]:
snake_case_ = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ = get_training_setup(__lowerCamelCase , __lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
snake_case_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def _a ( ) -> int:
snake_case_ = Accelerator()
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(__lowerCamelCase , batch_size=16 )
snake_case_ = RegressionDataset(length=96 )
snake_case_ = DataLoader(__lowerCamelCase , batch_size=16 )
snake_case_ = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if iteration < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if batch_num < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _a ( ) -> List[Any]:
snake_case_ = Accelerator()
snake_case_ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__lowerCamelCase , __lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase , __lowerCamelCase )
def _a ( _SCREAMING_SNAKE_CASE ) -> str:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 351 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
snake_case_ , snake_case_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE : List[str] = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 233 | 0 |
'''simple docstring'''
from __future__ import annotations
import queue
class UpperCAmelCase :
def __init__( self :Tuple , lowercase_ :Any )-> Optional[int]:
A__ = data
A__ = None
A__ = None
def UpperCamelCase ( ):
print("\n********Press N to stop entering at any point of time********\n" )
A__ = input("Enter the value of the root node: " ).strip().lower()
A__ = queue.Queue()
A__ = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
A__ = q.get()
A__ = F"Enter the left node of {node_found.data}: "
A__ = input(_SCREAMING_SNAKE_CASE ).strip().lower() or "n"
if check == "n":
return tree_node
A__ = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
A__ = left_node
q.put(_SCREAMING_SNAKE_CASE )
A__ = F"Enter the right node of {node_found.data}: "
A__ = input(_SCREAMING_SNAKE_CASE ).strip().lower() or "n"
if check == "n":
return tree_node
A__ = TreeNode(int(_SCREAMING_SNAKE_CASE ) )
A__ = right_node
q.put(_SCREAMING_SNAKE_CASE )
raise
def UpperCamelCase ( _lowerCamelCase : Optional[Any] ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def UpperCamelCase ( _lowerCamelCase : Any ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
A__ = queue.Queue()
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
A__ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCamelCase ( _lowerCamelCase : int ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
A__ = queue.Queue()
q.put(_SCREAMING_SNAKE_CASE )
while not q.empty():
A__ = []
while not q.empty():
A__ = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( _lowerCamelCase : Tuple ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
A__ = []
A__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(_SCREAMING_SNAKE_CASE )
A__ = n.left
# end of while means current node doesn't have left child
A__ = stack.pop()
# start to traverse its right child
A__ = n.right
def UpperCamelCase ( _lowerCamelCase : List[str] ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
A__ = []
A__ = node
while n or stack:
while n:
stack.append(_SCREAMING_SNAKE_CASE )
A__ = n.left
A__ = stack.pop()
print(n.data , end="," )
A__ = n.right
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not node:
return
A__, A__ = [], []
A__ = node
stacka.append(_SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
A__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def UpperCamelCase ( _lowerCamelCase : Optional[int] = "" , _lowerCamelCase : Optional[int]=50 , _lowerCamelCase : Tuple="*" ):
if not s:
return "\n" + width * char
A__, A__ = divmod(width - len(_SCREAMING_SNAKE_CASE ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
__lowerCAmelCase : TreeNode =build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 237 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
if num < 0:
return False
snake_case_ = num
snake_case_ = 0
while num > 0:
snake_case_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = "PoolFormerConfig"
# Base docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = [1, 5_12, 7, 7]
# Image classification docstring
__UpperCAmelCase = "sail/poolformer_s12"
__UpperCAmelCase = "tabby, tabby cat"
__UpperCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def A__ ( __lowerCamelCase, __lowerCamelCase = 0.0, __lowerCamelCase = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE_ = 1 - drop_prob
SCREAMING_SNAKE_CASE_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE_ = keep_prob + torch.rand(__lowerCamelCase, dtype=input.dtype, device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE_ = input.div(__lowerCamelCase ) * random_tensor
return output
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A = None ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE_ = drop_prob
def _UpperCamelCase ( self , _A ) -> torch.Tensor:
return drop_path(_A , self.drop_prob , self.training )
def _UpperCamelCase ( self ) -> str:
return "p={}".format(self.drop_prob )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A=None ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = patch_size if isinstance(_A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE_ = stride if isinstance(_A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE_ = padding if isinstance(_A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , kernel_size=_A , stride=_A , padding=_A )
SCREAMING_SNAKE_CASE_ = norm_layer(_A ) if norm_layer else nn.Identity()
def _UpperCamelCase ( self , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.projection(_A )
SCREAMING_SNAKE_CASE_ = self.norm(_A )
return embeddings
class UpperCamelCase__ ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self , _A , **_A ) -> Dict:
super().__init__(1 , _A , **_A )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.AvgPoolad(_A , stride=1 , padding=pool_size // 2 , count_include_pad=_A )
def _UpperCamelCase ( self , _A ) -> Union[str, Any]:
return self.pool(_A ) - hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1 )
SCREAMING_SNAKE_CASE_ = nn.Convad(_A , _A , 1 )
SCREAMING_SNAKE_CASE_ = PoolFormerDropPath(_A )
if isinstance(config.hidden_act , _A ):
SCREAMING_SNAKE_CASE_ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_ = config.hidden_act
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.conva(_A )
SCREAMING_SNAKE_CASE_ = self.act_fn(_A )
SCREAMING_SNAKE_CASE_ = self.drop(_A )
SCREAMING_SNAKE_CASE_ = self.conva(_A )
SCREAMING_SNAKE_CASE_ = self.drop(_A )
return hidden_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ = PoolFormerPooling(_A )
SCREAMING_SNAKE_CASE_ = PoolFormerOutput(_A , _A , _A , _A )
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(_A )
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(_A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE_ = PoolFormerDropPath(_A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE_ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((_A) ) , requires_grad=_A )
def _UpperCamelCase ( self , _A ) -> Any:
if self.use_layer_scale:
SCREAMING_SNAKE_CASE_ = self.pooling(self.before_norm(_A ) )
SCREAMING_SNAKE_CASE_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE_ = hidden_states + self.drop_path(_A )
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = self.output(self.after_norm(_A ) )
SCREAMING_SNAKE_CASE_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE_ = hidden_states + self.drop_path(_A )
SCREAMING_SNAKE_CASE_ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE_ = self.drop_path(self.pooling(self.before_norm(_A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE_ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE_ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE_ = self.drop_path(self.output(self.after_norm(_A ) ) )
SCREAMING_SNAKE_CASE_ = hidden_states + layer_output
SCREAMING_SNAKE_CASE_ = (output,) + outputs
return outputs
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE_ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE_ = nn.ModuleList(_A )
# Transformer blocks
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_A ) )
SCREAMING_SNAKE_CASE_ = nn.ModuleList(_A )
def _UpperCamelCase ( self , _A , _A=False , _A=True ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE_ = embedding_layer(_A )
# Send the embeddings through the blocks
for _, blk in enumerate(_A ):
SCREAMING_SNAKE_CASE_ = blk(_A )
SCREAMING_SNAKE_CASE_ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =PoolFormerConfig
UpperCAmelCase_ ="poolformer"
UpperCAmelCase_ ="pixel_values"
UpperCAmelCase_ =True
def _UpperCamelCase ( self , _A ) -> List[Any]:
if isinstance(_A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCamelCase ( self , _A , _A=False ) -> Any:
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = value
__UpperCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A ) -> Union[str, Any]:
super().__init__(_A )
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = PoolFormerEncoder(_A )
# Initialize weights and apply final processing
self.post_init()
def _UpperCamelCase ( self ) -> int:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCamelCase ( self , _A = None , _A = None , _A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
SCREAMING_SNAKE_CASE_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
SCREAMING_SNAKE_CASE_ = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , )
SCREAMING_SNAKE_CASE_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE_ = nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCamelCase ( self , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.dense(_A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , __SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A ) -> List[str]:
super().__init__(_A )
SCREAMING_SNAKE_CASE_ = config.num_labels
SCREAMING_SNAKE_CASE_ = PoolFormerModel(_A )
# Final norm
SCREAMING_SNAKE_CASE_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCamelCase ( self , _A = None , _A = None , _A = None , _A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
SCREAMING_SNAKE_CASE_ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ = self.poolformer(
_A , output_hidden_states=_A , return_dict=_A , )
SCREAMING_SNAKE_CASE_ = outputs[0]
SCREAMING_SNAKE_CASE_ = self.classifier(self.norm(_A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_ = '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE_ = '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE_ = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_ = loss_fct(_A , _A )
if not return_dict:
SCREAMING_SNAKE_CASE_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 257 |
def A__ ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCAmelCase = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 257 | 1 |
from __future__ import annotations
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray ):
__lowerCAmelCase , __lowerCAmelCase = np.shape(SCREAMING_SNAKE_CASE_ )
if rows != columns:
__lowerCAmelCase = (
"'table' has to be of square shaped array but got a "
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = np.zeros((rows, columns) )
__lowerCAmelCase = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
__lowerCAmelCase = (table[i][j] - total) / upper[j][j]
__lowerCAmelCase = 1
for j in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class a__ ( snake_case__ , unittest.TestCase ):
_a : Optional[Any] = DebertaVaTokenizer
_a : Optional[Any] = DebertaVaTokenizerFast
_a : List[str] = True
_a : Optional[Any] = True
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = DebertaVaTokenizer(_A , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "this is a test"
__lowerCAmelCase = "this is a test"
return input_text, output_text
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(_A ) , 3_0_0_0_1 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__lowerCAmelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = tokenizer.encode(_A )
__lowerCAmelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = "This is a test"
__lowerCAmelCase = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowerCAmelCase = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase = DebertaVaTokenizer(_A , keep_accents=_A )
__lowerCAmelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
__lowerCAmelCase = "I was born in 92000, and this is falsé."
__lowerCAmelCase = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowerCAmelCase = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__lowerCAmelCase = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = DebertaVaTokenizer(_A )
__lowerCAmelCase = tokenizer.encode("sequence builders" )
__lowerCAmelCase = tokenizer.encode("multi-sequence build" )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 92 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ["prompt"]
lowercase = ["prompt", "negative_prompt"]
lowercase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase = False
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return 100
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__UpperCamelCase = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__UpperCamelCase = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.dummy_prior
__UpperCamelCase = self.dummy_image_encoder
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_image_processor
__UpperCamelCase = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
__UpperCamelCase = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__UpperCAmelCase )
else:
__UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__UpperCamelCase = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'cpu'
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**__UpperCAmelCase )
__UpperCamelCase = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__UpperCamelCase = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
__UpperCamelCase = output.image_embeds
__UpperCamelCase = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
__UpperCamelCase = image[0, -10:]
__UpperCamelCase = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__UpperCamelCase = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = torch_device == 'cpu'
__UpperCamelCase = True
__UpperCamelCase = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = torch_device == 'cpu'
__UpperCamelCase = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 356 |
"""simple docstring"""
from math import factorial
UpperCamelCase : List[Any] = {str(d): factorial(d) for d in range(1_0)}
def A ( snake_case :int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(snake_case ) )
def A ( ) -> int:
__UpperCamelCase = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , snake_case ) if sum_of_digit_factorial(snake_case ) == i )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 263 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=64 , a_=5 , a_=4 , a_=64 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : Any = batch_size
__snake_case : Union[str, Any] = seq_length
__snake_case : Optional[Any] = is_training
__snake_case : Optional[int] = use_input_mask
__snake_case : Any = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : Tuple = vocab_size
__snake_case : Dict = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Tuple = type_sequence_label_size
__snake_case : List[Any] = initializer_range
__snake_case : Any = num_labels
__snake_case : Union[str, Any] = num_choices
__snake_case : Any = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = None
__snake_case : List[str] = None
__snake_case : int = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : str = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__snake_case : str = model(lowercase_ , lowercase_ )
__snake_case : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__snake_case : Any = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : str = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
__snake_case : str = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : int = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__snake_case : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[str] = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.num_labels
__snake_case : List[Any] = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = MPNetModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
__snake_case : Tuple = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Dict = model(lowercase_ )[0]
__snake_case : int = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowercase_ )
__snake_case : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 102 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ = logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""pixel_values"""]
def __init__( self :int , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :bool = True , lowercase_ :Union[int, float] = 1 / 2_55 , lowercase_ :bool = True , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :bool = True , **lowercase_ :Union[str, Any] , ) -> None:
super().__init__(**lowercase_ )
UpperCAmelCase = size if size is not None else {'height': 3_84, 'width': 3_84}
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BICUBIC , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Any , ) -> np.ndarray:
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :np.ndarray , lowercase_ :Union[int, float] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[int] , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :np.ndarray , lowercase_ :Union[float, List[float]] , lowercase_ :Union[float, List[float]] , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :Optional[Any] , ) -> np.ndarray:
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :ImageInput , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Dict[str, int]] = None , lowercase_ :PILImageResampling = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[float] = None , lowercase_ :Optional[bool] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[float, List[float]]] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :bool = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :Tuple , ) -> PIL.Image.Image:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
return encoded_outputs
| 78 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase_ = '\nHuman: <<task>>\n\nAssistant: '
UpperCAmelCase_ = 'huggingface-tools/default-prompts'
UpperCAmelCase_ = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[Any]="run" ) -> int:
if prompt_or_repo_id is None:
UpperCamelCase__ : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __UpperCAmelCase ) is not None:
return prompt_or_repo_id
UpperCamelCase__ : Any = cached_file(
__UpperCAmelCase , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 370 |
import qiskit
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> qiskit.result.counts.Counts:
UpperCamelCase__ : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ : str = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase__ : Union[str, Any] = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 247 | 0 |
"""simple docstring"""
import inspect
import unittest
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _lowercase ( self : Optional[Any] ):
import diffusers
from diffusers.dependency_versions_table import deps
__lowercase = inspect.getmembers(UpperCAmelCase__, inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__lowercase = "k-diffusion"
elif backend == "invisible_watermark":
__lowercase = "invisible-watermark"
assert backend in deps, F"""{backend} is not in the deps table!"""
| 17 |
lowerCamelCase : Tuple = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
__lowercase : Dict = start
# add current to visited
visited.append(lowerCAmelCase_ )
__lowercase : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowercase : List[Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
__lowercase : Tuple = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase : Any = topological_sort('''a''', [], [])
print(sort) | 233 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : List[Any]):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 226 | 0 |
lowerCAmelCase__ : Any ='''Input must be a string of 8 numbers plus letter'''
lowerCAmelCase__ : int ='''TRWAGMYFPDXBNJZSQVHLCKE'''
def __lowercase ( a__ ) -> bool:
if not isinstance(a__ , a__ ):
__SCREAMING_SNAKE_CASE = f"""Expected string as input, found {type(a__ ).__name__}"""
raise TypeError(a__ )
__SCREAMING_SNAKE_CASE = spanish_id.replace('-' , '' ).upper()
if len(a__ ) != 9:
raise ValueError(a__ )
try:
__SCREAMING_SNAKE_CASE = int(spanish_id_clean[0:8] )
__SCREAMING_SNAKE_CASE = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(a__ ) from ex
if letter.isdigit():
raise ValueError(a__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : jnp.ndarray
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
__SCREAMING_SNAKE_CASE = self.block_out_channels[i]
__SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = blocks
__SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
for block in self.blocks:
__SCREAMING_SNAKE_CASE = block(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
__SCREAMING_SNAKE_CASE = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = 32
UpperCamelCase__ : int = 4
UpperCamelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Union[bool, Tuple[bool]] = False
UpperCamelCase__ : Tuple[int] = (320, 640, 1280, 1280)
UpperCamelCase__ : int = 2
UpperCamelCase__ : Union[int, Tuple[int]] = 8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase__ : int = 1280
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = False
UpperCamelCase__ : jnp.dtype = jnp.floataa
UpperCamelCase__ : bool = True
UpperCamelCase__ : int = 0
UpperCamelCase__ : str = "rgb"
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = jax.random.split(_A )
__SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.block_out_channels
__SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(_A , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = block_out_channels[0]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE = output_channel
__SCREAMING_SNAKE_CASE = block_out_channels[i]
__SCREAMING_SNAKE_CASE = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
__SCREAMING_SNAKE_CASE = down_blocks
__SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
__SCREAMING_SNAKE_CASE = block_out_channels[-1]
__SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__SCREAMING_SNAKE_CASE = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
__SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(_A , 0 )
__SCREAMING_SNAKE_CASE = self.time_proj(_A )
__SCREAMING_SNAKE_CASE = self.time_embedding(_A )
# 2. pre-process
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
__SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , _A , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__SCREAMING_SNAKE_CASE = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
__SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
__SCREAMING_SNAKE_CASE = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
__SCREAMING_SNAKE_CASE = self.controlnet_mid_block(_A )
# 6. scaling
__SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> Union[str, Any]:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 233 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _a ( _SCREAMING_SNAKE_CASE = 8 ) -> str:
snake_case_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_SCREAMING_SNAKE_CASE )
snake_case_ = i // 3
snake_case_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
snake_case_ = (
chars_incl
+ random(_SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
+ random(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
)
snake_case_ = list(_SCREAMING_SNAKE_CASE )
shuffle(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(_SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
pass # Put your code here...
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(_SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
snake_case_ = any(char in ascii_uppercase for char in password )
snake_case_ = any(char in ascii_lowercase for char in password )
snake_case_ = any(char in digits for char in password )
snake_case_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _a ( ) -> Union[str, Any]:
snake_case_ = int(input("""Please indicate the max length of your password: """ ).strip() )
snake_case_ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(_SCREAMING_SNAKE_CASE ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 233 | 1 |
def lowerCAmelCase_ ( A_ = 1_00):
UpperCamelCase__: str = (n * (n + 1) // 2) ** 2
UpperCamelCase__: Tuple = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 149 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_lowerCAmelCase :int = ['small', 'medium', 'large']
_lowerCAmelCase :int = 'lm_head.decoder.weight'
_lowerCAmelCase :Dict = 'lm_head.weight'
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ):
_UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ )
_UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
_lowerCAmelCase :str = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl")
_lowerCAmelCase :int = f"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 263 | 0 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=__lowerCamelCase , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=__lowerCamelCase , default=5 )
parser.add_argument('''--batch_size''' , type=__lowerCamelCase , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=__lowerCamelCase , default=1 )
parser.add_argument('''--freeze''' , type=__lowerCamelCase , default=__lowerCamelCase )
parser.add_argument('''--learning_rate''' , type=__lowerCamelCase , default=5E-4 )
parser.add_argument('''--seed''' , type=__lowerCamelCase , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=__lowerCamelCase , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=__lowerCamelCase , default=10 )
parser.add_argument('''--weight_decay''' , type=__lowerCamelCase , default=0.01 )
parser.add_argument('''--output_dir''' , type=__lowerCamelCase , default='''./results''' )
return parser.parse_args()
UpperCAmelCase__ = load('accuracy')
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> int:
_snake_case , _snake_case = eval_pred
_snake_case = np.argmax(__lowerCamelCase , axis=1 )
return metric.compute(predictions=__lowerCamelCase , references=__lowerCamelCase )
class lowerCAmelCase__ ( A_ ):
def __init__( self : Optional[int] , _lowerCamelCase : Union[str, Any] ):
super().__init__()
_snake_case = trainer
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , **_lowerCamelCase : Union[str, Any] ):
if control.should_evaluate:
_snake_case = deepcopy(_lowerCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = get_args()
set_seed(args.seed )
_snake_case = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
_snake_case = dataset.train_test_split(test_size=0.2 )
_snake_case = train_test['''test'''].train_test_split(test_size=0.5 )
_snake_case = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
_snake_case = AutoTokenizer.from_pretrained(args.model_ckpt )
_snake_case = tokenizer.eos_token
_snake_case = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_snake_case = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_snake_case = False
_snake_case = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(__lowerCamelCase : List[str] ):
_snake_case = tokenizer(example['''src'''] , truncation=__lowerCamelCase , max_length=10_24 )
_snake_case = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_snake_case = train_test_validation.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=train_test_validation['''train'''].column_names , )
_snake_case = DataCollatorWithPadding(tokenizer=__lowerCamelCase )
_snake_case = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
_snake_case = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , compute_metrics=__lowerCamelCase , )
print('''Training...''' )
trainer.add_callback(CustomCallback(__lowerCamelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 371 |
"""simple docstring"""
import os
import sys
import unittest
UpperCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
UpperCAmelCase__ = os.path.join(git_repo_path, 'src', 'diffusers')
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Any ):
_snake_case = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_lowerCamelCase , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_lowerCamelCase , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_lowerCamelCase , '''torch_and_transformers_and_onnx''' )
def lowercase ( self : List[str] ):
_snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _lowerCamelCase )
self.assertIn('''torch_and_transformers''' , _lowerCamelCase )
self.assertIn('''flax_and_transformers''' , _lowerCamelCase )
self.assertIn('''torch_and_transformers_and_onnx''' , _lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def lowercase ( self : List[str] ):
_snake_case = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , '''\nCONSTANT = None\n''' )
_snake_case = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_lowerCamelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_snake_case = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_snake_case = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : str ):
_snake_case = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_snake_case = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _lowerCamelCase )
| 40 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase_( A_ ):
'''simple docstring'''
__lowercase : str = '''megatron-bert'''
def __init__( self ,__UpperCAmelCase=2_9056 ,__UpperCAmelCase=1024 ,__UpperCAmelCase=24 ,__UpperCAmelCase=16 ,__UpperCAmelCase=4096 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> int:
super().__init__(pad_token_id=snake_case_ ,**snake_case_ )
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = position_embedding_type
lowerCAmelCase__ : int = use_cache
| 37 |
"""simple docstring"""
import os
import numpy
import onnx
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
A__ = a.name
A__ = b.name
A__ = ""
A__ = ""
A__ = a == b
A__ = name_a
A__ = name_b
return res
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase_ , lowercase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase_ , lowercase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase_ , lowercase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
for n in graph_proto.node:
_node_replace_input_with(lowercase_ , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
A__ = list(model.graph.initializer )
A__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A__ = inits[i].name
A__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase_ , lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
A__ = os.path.dirname(lowercase_ )
A__ = os.path.basename(lowercase_ )
A__ = onnx.load(os.path.join(lowercase_ , lowercase_ ) )
A__ = list(model.graph.initializer )
A__ = set()
A__ = {}
A__ = []
A__ = 0
for i in range(len(lowercase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase_ )
dup_set.add(lowercase_ )
A__ = inits[j].data_type
A__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , lowercase_ )
total_reduced_size += mem_size
A__ = inits[i].name
A__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase_ )
else:
A__ = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 10_24 / 10_24 / 10_24 , "GB" )
A__ = sorted(lowercase_ )
_remove_dup_initializers_from_model(lowercase_ , lowercase_ , lowercase_ )
A__ = "optimized_" + model_file_name
A__ = os.path.join(lowercase_ , lowercase_ )
onnx.save(lowercase_ , lowercase_ )
return new_model
| 247 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a ( UpperCAmelCase ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(A_ , "depth_multiplier" ) )
class a :
def __init__( self , A_ , A_=13 , A_=3 , A_=32 , A_=0.25 , A_=8 , A_=8 , A_=6 , A_=32 , A_=True , A_=True , A_=True , A_="relu6" , A_=1280 , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , ):
'''simple docstring'''
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Union[str, Any] = depth_multiplier
_UpperCAmelCase : Optional[int] = depth_divisible_by
_UpperCAmelCase : Optional[Any] = min_depth
_UpperCAmelCase : List[Any] = expand_ratio
_UpperCAmelCase : List[Any] = tf_padding
_UpperCAmelCase : List[str] = output_stride
_UpperCAmelCase : List[Any] = first_layer_is_expansion
_UpperCAmelCase : Tuple = finegrained_output
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_UpperCAmelCase : Optional[int] = classifier_dropout_prob
_UpperCAmelCase : List[Any] = use_labels
_UpperCAmelCase : Tuple = is_training
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Optional[Any] = scope
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : int = MobileNetVaModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Optional[int] = model(A_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.num_labels
_UpperCAmelCase : Optional[Any] = MobileNetVaForImageClassification(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : int = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : int = MobileNetVaForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase : Dict = model(A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : List[str] = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_lowercase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = MobileNetVaModelTester(self )
_UpperCAmelCase : int = MobileNetVaConfigTester(self , config_class=A_ , has_text_modality=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = model_class(A_ )
_UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
_UpperCAmelCase : Any = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(A_ , A_ ) )
_UpperCAmelCase : str = outputs.hidden_states
_UpperCAmelCase : Union[str, Any] = 16
self.assertEqual(len(A_ ) , A_ )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[int] = True
check_hidden_states_output(A_ , A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Union[str, Any] = MobileNetVaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(A_ )
_UpperCAmelCase : List[str] = self.default_image_processor
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Any = model(**A_ )
# verify the logits
_UpperCAmelCase : int = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , A_ )
_UpperCAmelCase : str = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_UpperCAmelCase : List[str] = model.to(A_ )
_UpperCAmelCase : int = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=A_ , return_tensors="pt" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(**A_ )
_UpperCAmelCase : Tuple = outputs.logits
# verify the logits
_UpperCAmelCase : str = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , A_ )
_UpperCAmelCase : List[str] = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
| 189 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
SCREAMING_SNAKE_CASE_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str ) -> Tuple:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase : Union[str, Any] = model_type_to_module_name(lowerCAmelCase )
_UpperCAmelCase : Optional[int] = importlib.import_module(F'.{module_name}' , "transformers.models" )
try:
return getattr(lowerCAmelCase , lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase , "__name__" , lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase : Any = importlib.import_module("transformers" )
if hasattr(lowerCAmelCase , lowerCAmelCase ):
return getattr(lowerCAmelCase , lowerCAmelCase )
return None
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, os.PathLike] , lowerCAmelCase: Optional[Union[str, os.PathLike]] = None , lowerCAmelCase: bool = False , lowerCAmelCase: bool = False , lowerCAmelCase: Optional[Dict[str, str]] = None , lowerCAmelCase: Optional[Union[bool, str]] = None , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: bool = False , **lowerCAmelCase: List[Any] , ) -> Any:
_UpperCAmelCase : List[Any] = get_file_from_repo(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(lowerCAmelCase , encoding="utf-8" ) as reader:
return json.load(lowerCAmelCase )
class a :
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(A_ )
def _UpperCAmelCase ( cls , A_ , **A_ ):
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("config" , A_ )
_UpperCAmelCase : Dict = kwargs.pop("trust_remote_code" , A_ )
_UpperCAmelCase : str = True
_UpperCAmelCase , _UpperCAmelCase : Tuple = ImageProcessingMixin.get_image_processor_dict(A_ , **A_ )
_UpperCAmelCase : Any = config_dict.get("image_processor_type" , A_ )
_UpperCAmelCase : str = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
_UpperCAmelCase : int = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_UpperCAmelCase : Any = config_dict.pop("feature_extractor_type" , A_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
_UpperCAmelCase : Optional[Any] = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
_UpperCAmelCase : Optional[Any] = config_dict["auto_map"]["AutoFeatureExtractor"]
_UpperCAmelCase : List[Any] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(A_ , A_ ):
_UpperCAmelCase : Any = AutoConfig.from_pretrained(A_ , **A_ )
# It could be in `config.image_processor_type``
_UpperCAmelCase : Optional[Any] = getattr(A_ , "image_processor_type" , A_ )
if hasattr(A_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
_UpperCAmelCase : Any = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
_UpperCAmelCase : List[str] = image_processor_class_from_name(A_ )
_UpperCAmelCase : Optional[Any] = image_processor_auto_map is not None
_UpperCAmelCase : Any = image_processor_class is not None or type(A_ ) in IMAGE_PROCESSOR_MAPPING
_UpperCAmelCase : List[Any] = resolve_trust_remote_code(
A_ , A_ , A_ , A_ )
if has_remote_code and trust_remote_code:
_UpperCAmelCase : Optional[int] = get_class_from_dynamic_module(
A_ , A_ , **A_ )
_UpperCAmelCase : Optional[int] = kwargs.pop("code_revision" , A_ )
if os.path.isdir(A_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(A_ , **A_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(A_ , **A_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(A_ ) in IMAGE_PROCESSOR_MAPPING:
_UpperCAmelCase : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(A_ )]
return image_processor_class.from_dict(A_ , **A_ )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _UpperCAmelCase ( A_ , A_ ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(A_ , A_ )
| 189 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase__ : Any = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n'
UpperCAmelCase__ : Union[str, Any] = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
UpperCAmelCase__ : Union[str, Any] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def lowercase_ ( _snake_case ):
def remove_articles(_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = re.compile(R"""\b(a|an|the)\b""" ,re.UNICODE )
return re.sub(_UpperCAmelCase ,""" """ ,_UpperCAmelCase )
def white_space_fix(_snake_case ):
return " ".join(text.split() )
def remove_punc(_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_snake_case ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def lowercase_ ( _snake_case ,_snake_case ):
return int(normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase ) )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = [any(compute_exact(_UpperCAmelCase ,_UpperCAmelCase ) for ref in refs ) for pred, refs in zip(_UpperCAmelCase ,_UpperCAmelCase )]
return (sum(_UpperCAmelCase ) / len(_UpperCAmelCase )) * 100
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = [rgram for rgrams in rgramslist for rgram in rgrams]
SCREAMING_SNAKE_CASE__ : List[str] = Counter(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = Counter(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Counter()
for sgram, scount in sgramcounter.items():
SCREAMING_SNAKE_CASE__ : Dict = scount * numref
SCREAMING_SNAKE_CASE__ : str = Counter(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = Counter()
for cgram, ccount in cgramcounter.items():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ccount * numref
# KEEP
SCREAMING_SNAKE_CASE__ : Tuple = sgramcounter_rep & cgramcounter_rep
SCREAMING_SNAKE_CASE__ : Dict = keepgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE__ : Any = sgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = keeptmpscorea / len(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
SCREAMING_SNAKE_CASE__ : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
SCREAMING_SNAKE_CASE__ : Optional[Any] = sgramcounter_rep - cgramcounter_rep
SCREAMING_SNAKE_CASE__ : List[Any] = delgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE__ : List[Any] = sgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : List[str] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ : List[Any] = 1
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = deltmpscorea / len(_UpperCAmelCase )
# ADDITION
SCREAMING_SNAKE_CASE__ : Dict = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = set(_UpperCAmelCase ) & set(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : List[str] = 1
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Dict = addtmpscore / len(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = addtmpscore / len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = ssent.split(""" """ )
SCREAMING_SNAKE_CASE__ : Dict = csent.split(""" """ )
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Tuple = []
for rsent in rsents:
SCREAMING_SNAKE_CASE__ : List[Any] = rsent.split(""" """ )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[Any] = []
ragramslist.append(_UpperCAmelCase )
for i in range(0 ,len(_UpperCAmelCase ) - 1 ):
if i < len(_UpperCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 2:
SCREAMING_SNAKE_CASE__ : Any = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 3:
SCREAMING_SNAKE_CASE__ : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_UpperCAmelCase )
ragramslist.append(_UpperCAmelCase )
ragramslist.append(_UpperCAmelCase )
ragramslist.append(_UpperCAmelCase )
for i in range(0 ,len(_UpperCAmelCase ) - 1 ):
if i < len(_UpperCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : List[str] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 2:
SCREAMING_SNAKE_CASE__ : Tuple = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 3:
SCREAMING_SNAKE_CASE__ : Tuple = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_UpperCAmelCase )
for i in range(0 ,len(_UpperCAmelCase ) - 1 ):
if i < len(_UpperCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : Any = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 2:
SCREAMING_SNAKE_CASE__ : Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_UpperCAmelCase )
if i < len(_UpperCAmelCase ) - 3:
SCREAMING_SNAKE_CASE__ : Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_UpperCAmelCase )
(SCREAMING_SNAKE_CASE__) : List[str] = SARIngram(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
(SCREAMING_SNAKE_CASE__) : int = SARIngram(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
(SCREAMING_SNAKE_CASE__) : Union[str, Any] = SARIngram(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
(SCREAMING_SNAKE_CASE__) : Optional[int] = SARIngram(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
SCREAMING_SNAKE_CASE__ : List[str] = sum([delascore, delascore, delascore, delascore] ) / 4
SCREAMING_SNAKE_CASE__ : str = sum([addascore, addascore, addascore, addascore] ) / 4
SCREAMING_SNAKE_CASE__ : Optional[int] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase_ ( _snake_case ,_snake_case = True ,_snake_case = "13a" ,_snake_case = True ):
if lowercase:
SCREAMING_SNAKE_CASE__ : List[str] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
SCREAMING_SNAKE_CASE__ : List[str] = sacrebleu.metrics.bleu._get_tokenizer(_UpperCAmelCase )()(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(_UpperCAmelCase )
elif tokenizer == "moses":
SCREAMING_SNAKE_CASE__ : Optional[int] = sacremoses.MosesTokenizer().tokenize(_UpperCAmelCase ,return_str=_UpperCAmelCase ,escape=_UpperCAmelCase )
elif tokenizer == "penn":
SCREAMING_SNAKE_CASE__ : Dict = sacremoses.MosesTokenizer().penn_tokenize(_UpperCAmelCase ,return_str=_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sentence
if not return_str:
SCREAMING_SNAKE_CASE__ : List[str] = normalized_sent.split()
return normalized_sent
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if not (len(_UpperCAmelCase ) == len(_UpperCAmelCase ) == len(_UpperCAmelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for src, pred, refs in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
sari_score += SARIsent(normalize(_UpperCAmelCase ) ,normalize(_UpperCAmelCase ) ,[normalize(_UpperCAmelCase ) for sent in refs] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sari_score / len(_UpperCAmelCase )
return 100 * sari_score
def lowercase_ ( _snake_case ,_snake_case ,_snake_case="exp" ,_snake_case=None ,_snake_case=False ,_snake_case=False ,_snake_case=False ,):
SCREAMING_SNAKE_CASE__ : Tuple = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
SCREAMING_SNAKE_CASE__ : Dict = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
SCREAMING_SNAKE_CASE__ : Optional[int] = sacrebleu.corpus_bleu(
_UpperCAmelCase ,_UpperCAmelCase ,smooth_method=_UpperCAmelCase ,smooth_value=_UpperCAmelCase ,force=_UpperCAmelCase ,lowercase=_UpperCAmelCase ,use_effective_order=_UpperCAmelCase ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = {}
result.update({"""sari""": compute_sari(sources=a_ , predictions=a_ , references=a_ )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=a_ , references=a_ )} )
result.update({"""exact""": compute_em(predictions=a_ , references=a_ )} )
return result
| 25 |
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = 0
__UpperCAmelCase : str = len(_UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a ( _UpperCAmelCase : str ):
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return arr, 0
__UpperCAmelCase : Dict = len(_UpperCAmelCase ) // 2
__UpperCAmelCase : Union[str, Any] = arr[0:mid]
__UpperCAmelCase : Optional[Any] = arr[mid:]
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Any = 0
while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__UpperCAmelCase : List[str] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , _UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__UpperCAmelCase : Any = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
# an empty list should also have zero inversions
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 226 | 0 |
import numpy as np
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ):
'''simple docstring'''
return vector * sigmoid(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowercase = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
_A : Any = '''maskformer'''
_A : Any = {'''hidden_size''': '''mask_feature_size'''}
_A : List[str] = ['''resnet''', '''swin''']
_A : Tuple = ['''detr''']
def __init__( self : Optional[Any] , _a : int = 256 , _a : int = 256 , _a : float = 0.1 , _a : bool = False , _a : Optional[Dict] = None , _a : Optional[Dict] = None , _a : float = 0.02 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 1.0 , _a : float = 20.0 , _a : Optional[bool] = None , **_a : List[str] , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCamelCase__ = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_a , _a ):
UpperCamelCase__ = backbone_config.pop('''model_type''' )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCamelCase__ = DetrConfig()
else:
# verify that the decoder is supported
UpperCamelCase__ = (
decoder_config.pop('''model_type''' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {",".join(self.decoders_supported )}""" )
if isinstance(_a , _a ):
UpperCamelCase__ = CONFIG_MAPPING[decoder_type]
UpperCamelCase__ = config_class.from_dict(_a )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = decoder_config
# main feature dimension for the model
UpperCamelCase__ = fpn_feature_size
UpperCamelCase__ = mask_feature_size
# initializer
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
# Hungarian matcher && loss
UpperCamelCase__ = cross_entropy_weight
UpperCamelCase__ = dice_weight
UpperCamelCase__ = mask_weight
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = no_object_weight
UpperCamelCase__ = output_auxiliary_logits
UpperCamelCase__ = self.decoder_config.encoder_attention_heads
UpperCamelCase__ = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def A_ ( cls : Tuple , _a : PretrainedConfig , _a : PretrainedConfig , **_a : str ):
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def A_ ( self : str ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.decoder_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 35 | 1 |
def snake_case_ ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 0 ):
__lowercase : List[Any] = length or len(lowerCAmelCase_ )
__lowercase : List[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__lowercase , __lowercase : List[str] = list_data[i + 1], list_data[i]
__lowercase : List[str] = True
return list_data if not swapped else bubble_sort(lowerCAmelCase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 233 |
lowerCamelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def snake_case_ ( ):
__lowercase : List[str] = input("""Enter message: """ )
__lowercase : int = input("""Enter key [alphanumeric]: """ )
__lowercase : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowercase : Optional[int] = """encrypt"""
__lowercase : Dict = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("""d""" ):
__lowercase : Union[str, Any] = """decrypt"""
__lowercase : Optional[int] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"\n{mode.title()}ed message:" )
print(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """encrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , """decrypt""" )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = []
__lowercase : Tuple = 0
__lowercase : Dict = key.upper()
for symbol in message:
__lowercase : Optional[Any] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
__lowercase : str = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main() | 233 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_UpperCamelCase = 300 # TEMPERATURE (unit = K)
def lowercase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ):
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : List[Any] = embed_dim
__UpperCAmelCase : str = depths
__UpperCAmelCase : Dict = num_heads
__UpperCAmelCase : str = window_size
__UpperCAmelCase : int = mlp_ratio
__UpperCAmelCase : Union[str, Any] = qkv_bias
__UpperCAmelCase : Dict = hidden_dropout_prob
__UpperCAmelCase : str = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Optional[int] = use_absolute_embeddings
__UpperCAmelCase : Any = patch_norm
__UpperCAmelCase : Union[str, Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : Any = scope
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : int = encoder_stride
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Tuple = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase )
__UpperCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = SwinvaForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : str = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = self.type_sequence_label_size
__UpperCAmelCase : str = SwinvaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs
__UpperCAmelCase : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Dict = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[str] = SwinvaModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 )
def __A ( self ) -> Any:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(__UpperCAmelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : str = [*signature.parameters.keys()]
__UpperCAmelCase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : str = outputs.attentions
__UpperCAmelCase : Any = len(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = config.window_size**2
__UpperCAmelCase : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase : Dict = len(__UpperCAmelCase )
# Check attention is always last and order is fine
__UpperCAmelCase : Any = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
__UpperCAmelCase : Any = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) )
__UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = outputs.hidden_states
__UpperCAmelCase : List[Any] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swinv2 has a different seq_length
__UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = reshaped_hidden_states[0].shape
__UpperCAmelCase : Any = (
reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase : int = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Tuple = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Dict = SwinvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _A ( unittest.TestCase ):
@cached_property
def __A ( self ) -> int:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
__UpperCAmelCase )
__UpperCAmelCase : Tuple = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__UpperCAmelCase : Any = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**__UpperCAmelCase )
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_:
def __init__( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=1_3 , UpperCamelCase_ : Optional[Any]=3_2 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=1_6 , UpperCamelCase_ : int=[1, 2, 1] , UpperCamelCase_ : Union[str, Any]=[2, 2, 4] , UpperCamelCase_ : int=2 , UpperCamelCase_ : int=2.0 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : Dict=1E-5 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=True , UpperCamelCase_ : str=1_0 , UpperCamelCase_ : Tuple=8 , ):
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : int = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : List[Any] = num_heads
lowerCAmelCase : Dict = window_size
lowerCAmelCase : Tuple = mlp_ratio
lowerCAmelCase : Union[str, Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = drop_path_rate
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[Any] = use_absolute_embeddings
lowerCAmelCase : Optional[int] = patch_norm
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : List[str] = scope
lowerCAmelCase : int = use_labels
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : int = encoder_stride
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : List[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[str] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Tuple = SwinvaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase : str = model(__UpperCAmelCase )
lowerCAmelCase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int ):
lowerCAmelCase : List[Any] = SwinvaForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase : Optional[int] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : str = 1
lowerCAmelCase : Optional[Any] = SwinvaForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : str = self.type_sequence_label_size
lowerCAmelCase : Union[str, Any] = SwinvaForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase : Dict = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_( _a , _a , unittest.TestCase ):
__UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCamelCase = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Optional[int] = SwinvaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=3_7 )
def lowerCamelCase__ ( self : int ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def lowerCamelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def lowerCamelCase__ ( self : Optional[int] ):
pass
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : str = model_class(__UpperCAmelCase )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[str] = [*signature.parameters.keys()]
lowerCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = False
lowerCAmelCase : int = True
lowerCAmelCase : Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase : Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase : int = outputs.attentions
lowerCAmelCase : List[Any] = len(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : Dict = True
lowerCAmelCase : Union[str, Any] = config.window_size**2
lowerCAmelCase : List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase : Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase : Union[str, Any] = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase : str = len(__UpperCAmelCase )
# Check attention is always last and order is fine
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowerCAmelCase : Union[str, Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase : Any = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCAmelCase ) )
lowerCAmelCase : Any = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase : Any = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase : Optional[int] = outputs.hidden_states
lowerCAmelCase : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swinv2 has a different seq_length
lowerCAmelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase : Optional[int] = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
lowerCAmelCase : str = reshaped_hidden_states[0].shape
lowerCAmelCase : str = (
reshaped_hidden_states[0].view(__UpperCAmelCase , __UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Dict = 3
lowerCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase__ ( self : List[str] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = SwinvaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[Any] = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase : int = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : int ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
__UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : str = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase : Any = model(**__UpperCAmelCase )
# verify the logits
lowerCAmelCase : str = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase : Dict = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 60 |
"""simple docstring"""
from itertools import permutations
def lowercase ( A_ )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( A_ = 10 )-> int:
'''simple docstring'''
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *_a : Optional[Any] , **_a : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *_a : int , **_a : Any ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : List[str] , *_a : Optional[int] , **_a : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *_a : int , **_a : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : int , *_a : Any , **_a : Any ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : int , *_a : Tuple , **_a : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *_a : Optional[Any] , **_a : Any ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : str , *_a : int , **_a : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : Dict , *_a : Any , **_a : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : int , *_a : int , **_a : Union[str, Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : Any , *_a : Any , **_a : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : int , *_a : Optional[int] , **_a : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *_a : str , **_a : Union[str, Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : Any , *_a : str , **_a : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *_a : Tuple , **_a : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class lowerCamelCase__ ( metaclass=_a ):
_lowerCAmelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *_a : int , **_a : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : str , *_a : Optional[int] , **_a : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *_a : Optional[int] , **_a : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 350 |
import os
def __lowerCamelCase ( __magic_name__ : str = "input.txt" ):
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
a__: str =[
[int(__magic_name__ ) for element in line.split("," )]
for line in input_file.readlines()
]
a__: int =len(__magic_name__ )
a__: int =len(matrix[0] )
a__: Optional[Any] =[[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
a__: Dict =matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
a__: int =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __a ( A__ ):
def __init__( self : int , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : List[str] = {}
if prompt is not None:
UpperCamelCase__ : Dict = prompt
if generate_kwargs is not None:
UpperCamelCase__ : Tuple = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCamelCase__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCamelCase__ : Tuple = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
UpperCamelCase__ : Dict = load_image(SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(
F'Received an invalid text input, got - {type(SCREAMING_SNAKE_CASE )} - but expected a single string. '
"Note also that one single text can be provided for conditional image to text generation." )
UpperCamelCase__ : Tuple = self.model.config.model_type
if model_type == "git":
UpperCamelCase__ : Any = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
UpperCamelCase__ : int = self.tokenizer(text=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ).input_ids
UpperCamelCase__ : Dict = [self.tokenizer.cls_token_id] + input_ids
UpperCamelCase__ : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCamelCase__ : Tuple = self.image_processor(images=SCREAMING_SNAKE_CASE , header_text=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCamelCase__ : str = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
UpperCamelCase__ : str = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
UpperCamelCase__ : int = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCamelCase__ : Tuple = None
return model_inputs
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCamelCase__ : str = None
if generate_kwargs is None:
UpperCamelCase__ : List[Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCamelCase__ : Any = model_inputs.pop(self.model.main_input_name )
UpperCamelCase__ : Dict = self.model.generate(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return model_outputs
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : List[str] = []
for output_ids in model_outputs:
UpperCamelCase__ : Tuple = {
"generated_text": self.tokenizer.decode(
SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , )
}
records.append(SCREAMING_SNAKE_CASE )
return records | 189 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[int] =logging.get_logger()
@dataclass
class __a :
_lowerCAmelCase : nn.Module
_lowerCAmelCase : List[nn.Module] = field(default_factory=A__ )
_lowerCAmelCase : list = field(default_factory=A__ )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tensor , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class __a :
_lowerCAmelCase : nn.Module
_lowerCAmelCase : nn.Module
_lowerCAmelCase : int = 0
_lowerCAmelCase : List = field(default_factory=A__ )
_lowerCAmelCase : List = field(default_factory=A__ )
def __call__( self : Any , SCREAMING_SNAKE_CASE : Tensor ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = Tracker(self.dest )(SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase__ : Any = Tracker(self.src )(SCREAMING_SNAKE_CASE ).parametrized
UpperCamelCase__ : str = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.src_skip , SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = list(filter(lambda SCREAMING_SNAKE_CASE : type(SCREAMING_SNAKE_CASE ) not in self.dest_skip , SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise Exception(
F'Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ) -> Optional[int]:
print(f'Converting {name}...' )
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
UpperCamelCase__ : List[Any] = ResNetForImageClassification(__lowerCAmelCase ).eval()
UpperCamelCase__ : Optional[Any] = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase )
UpperCamelCase__ : int = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCAmelCase )
assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase__ : List[Any] = f'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
# we can use the convnext one
UpperCamelCase__ : List[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
print(f'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ) -> Dict:
UpperCamelCase__ : Dict = "imagenet-1k-id2label.json"
UpperCamelCase__ : Optional[int] = 1000
UpperCamelCase__ : Any = (1, num_labels)
UpperCamelCase__ : Union[str, Any] = "huggingface/label-files"
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : str = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ : Optional[int] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = idalabel
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Union[str, Any] = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase : int =parser.parse_args()
lowerCamelCase : Path =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 189 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase__ : int = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ["PoolFormerFeatureExtractor"]
lowercase__ : int = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 180 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Dict = "▁"
lowercase__ : Union[str, Any] = {"vocab_file": "spiece.model"}
lowercase__ : Union[str, Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ : Tuple = {
"google/reformer-crime-and-punishment": 524_288,
}
class a__ ( UpperCamelCase__ ):
a : List[Any] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="</s>" , A="<unk>" , A=[] , A = None , **A , ) -> None:
'''simple docstring'''
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A , unk_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self ) -> Dict[str, int]:
'''simple docstring'''
a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
a = self.__dict__.copy()
a = None
return state
def __setstate__( self , A ) -> Union[str, Any]:
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A , out_type=A )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(A )
def lowerCAmelCase_ ( self , A ) -> Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(A )
return token
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
a = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def lowerCAmelCase_ ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 180 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase = 1_000 ) -> int:
return sum(e for e in range(3 , _lowerCAmelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 35 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = ProphetNetTokenizer
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self : str , __lowerCamelCase : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "UNwant\u00E9d,running"
_SCREAMING_SNAKE_CASE = "unwanted, running"
return input_text, output_text
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=__lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = i
_SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_SCREAMING_SNAKE_CASE = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_SCREAMING_SNAKE_CASE = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 111 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float , ) -> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase_ = 300 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> float:
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger()
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ) -> Union[str, Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
lowercase__ : str = timm.create_model('''levit_128s''' , pretrained=__lowerCamelCase )
else:
lowercase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__lowerCamelCase )
if hidden_sizes == 1_92:
lowercase__ : Union[str, Any] = timm.create_model('''levit_192''' , pretrained=__lowerCamelCase )
if hidden_sizes == 2_56:
lowercase__ : str = timm.create_model('''levit_256''' , pretrained=__lowerCamelCase )
if hidden_sizes == 3_84:
lowercase__ : str = timm.create_model('''levit_384''' , pretrained=__lowerCamelCase )
from_model.eval()
lowercase__ : Optional[int] = LevitForImageClassificationWithTeacher(__lowerCamelCase ).eval()
lowercase__ : str = OrderedDict()
lowercase__ : int = from_model.state_dict()
lowercase__ : Dict = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for i in range(len(__lowerCamelCase ) ):
lowercase__ : str = weights[og_keys[i]]
our_model.load_state_dict(__lowerCamelCase )
lowercase__ : Optional[int] = torch.randn((2, 3, 2_24, 2_24) )
lowercase__ : Optional[int] = from_model(__lowerCamelCase )
lowercase__ : List[Any] = our_model(__lowerCamelCase ).logits
assert torch.allclose(__lowerCamelCase , __lowerCamelCase ), "The model logits don't match the original one."
lowercase__ : Any = name
print(__lowerCamelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : int = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ) -> List[Any]:
lowercase__ : Any = '''imagenet-1k-id2label.json'''
lowercase__ : Tuple = 10_00
lowercase__ : Dict = (1, num_labels)
lowercase__ : List[str] = '''huggingface/label-files'''
lowercase__ : str = num_labels
lowercase__ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ : Union[str, Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__ : Union[str, Any] = idalabel
lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
lowercase__ : Tuple = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
lowercase__ : Any = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 16 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 * 8 , __SCREAMING_SNAKE_CASE=32 * 8 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=64 , ):
"""simple docstring"""
UpperCamelCase : int = parent
UpperCamelCase : str = batch_size
UpperCamelCase : str = is_training
UpperCamelCase : int = use_auxiliary_loss
UpperCamelCase : Union[str, Any] = num_queries
UpperCamelCase : Any = num_channels
UpperCamelCase : Optional[Any] = min_size
UpperCamelCase : Tuple = max_size
UpperCamelCase : Optional[Any] = num_labels
UpperCamelCase : Optional[int] = hidden_dim
UpperCamelCase : List[str] = hidden_dim
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
UpperCamelCase : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
UpperCamelCase : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
UpperCamelCase : str = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
UpperCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase : Dict = self.num_queries
UpperCamelCase : Tuple = self.num_labels
UpperCamelCase : Dict = [1, 1, 1, 1]
UpperCamelCase : Optional[int] = self.num_channels
UpperCamelCase : List[str] = 64
UpperCamelCase : Tuple = 128
UpperCamelCase : List[str] = self.hidden_dim
UpperCamelCase : List[str] = self.hidden_dim
UpperCamelCase : int = self.hidden_dim
return config
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = output.encoder_hidden_states
UpperCamelCase : Tuple = output.pixel_decoder_hidden_states
UpperCamelCase : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with torch.no_grad():
UpperCamelCase : Optional[Any] = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
UpperCamelCase : Optional[Any] = model(pixel_values=_a , pixel_mask=_a )
UpperCamelCase : Any = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(__SCREAMING_SNAKE_CASE ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase : Optional[int] = model(pixel_values=_a , pixel_mask=_a )
UpperCamelCase : List[Any] = model(_a )
comm_check_on_output(_a )
UpperCamelCase : Optional[Any] = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase_ ( UpperCamelCase_, UpperCamelCase_, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCamelCase : Tuple = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__UpperCamelCase : Tuple = False
__UpperCamelCase : Dict = False
__UpperCamelCase : Dict = False
__UpperCamelCase : List[str] = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = MaskaFormerModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : str = model_class(_a )
UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
@slow
def _lowercase ( self ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase : Any = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = (self.model_tester.min_size,) * 2
UpperCamelCase : Tuple = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_a ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_a ),
"""class_labels""": torch.zeros(2 , 10 , device=_a ).long(),
}
UpperCamelCase : List[str] = self.model_tester.get_config()
UpperCamelCase : Dict = MaskaFormerForUniversalSegmentation(_a ).to(_a )
UpperCamelCase : Optional[int] = model(**_a )
self.assertTrue(outputs.loss is not None )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(_a ).to(_a )
UpperCamelCase : int = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def _lowercase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase : Dict = self.all_model_classes[1]
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
UpperCamelCase : int = model_class(_a )
model.to(_a )
model.train()
UpperCamelCase : str = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.all_model_classes[1]
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase : Tuple = True
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Optional[int] = model_class(_a ).to(_a )
model.train()
UpperCamelCase : Tuple = model(_a , mask_labels=_a , class_labels=_a )
UpperCamelCase : Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase : Tuple = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase : str = 1E-4
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowercase ( self ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
UpperCamelCase : Optional[Any] = self.default_image_processor
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : List[Any] = image_processor(_a , return_tensors='''pt''' ).to(_a )
UpperCamelCase : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase : str = model(**_a )
UpperCamelCase : int = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
UpperCamelCase : int = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
UpperCamelCase : int = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
UpperCamelCase : List[Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Any = image_processor(_a , return_tensors='''pt''' ).to(_a )
UpperCamelCase : List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**_a )
# masks_queries_logits
UpperCamelCase : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase : int = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
UpperCamelCase : Tuple = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
UpperCamelCase : int = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase : List[Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
UpperCamelCase : str = self.default_image_processor
UpperCamelCase : int = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
UpperCamelCase : Optional[int] = inputs["""pixel_values"""].to(_a )
UpperCamelCase : int = [el.to(_a ) for el in inputs["""mask_labels"""]]
UpperCamelCase : Union[str, Any] = [el.to(_a ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 362 |
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315 | 0 |
import re
def _A ( _lowercase ) -> str:
"""simple docstring"""
if len(re.findall('[ATCG]' , __A ) ) != len(__A ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def _a (self , _lowerCamelCase ):
"""simple docstring"""
with open(_lowerCamelCase , encoding="""utf-8""" ) as input_file:
UpperCAmelCase__ : Any = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
UpperCAmelCase__ : Optional[Any] = input_file.read()
UpperCAmelCase__ : int = regexp.search(_lowerCamelCase )
return match
def _a (self , _lowerCamelCase ):
"""simple docstring"""
with open(_lowerCamelCase , encoding="""utf-8""" ) as input_file:
UpperCAmelCase__ : Any = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
UpperCAmelCase__ : Dict = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ : Union[str, Any] = regexp.finditer(_lowerCamelCase )
UpperCAmelCase__ : Any = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = Path("""./datasets""" )
UpperCAmelCase__ : Tuple = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_lowerCamelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = Path("""./datasets""" )
UpperCAmelCase__ : List[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_lowerCamelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 166 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'OwlViTImageProcessor'
SCREAMING_SNAKE_CASE = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="max_length" , _lowerCamelCase="np" , **_lowerCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
UpperCAmelCase__ : Any = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
UpperCAmelCase__ : Any = []
# Maximum number of queries across batch
UpperCAmelCase__ : int = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ : Optional[int] = t + [""" """] * (max_num_queries - len(_lowerCamelCase ))
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase__ : Optional[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : int = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Tuple = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase__ : Dict = BatchEncoding()
UpperCAmelCase__ : int = input_ids
UpperCAmelCase__ : Optional[int] = attention_mask
if query_images is not None:
UpperCAmelCase__ : int = BatchEncoding()
UpperCAmelCase__ : Optional[int] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
UpperCAmelCase__ : List[Any] = query_pixel_values
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor
| 166 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case ( snake_case__ :int , snake_case__ :List[str] , snake_case__ :Union[str, Any]) -> str:
# Initialise PyTorch model
_A = AlbertConfig.from_json_file(snake_case__)
print(F'''Building PyTorch model from configuration: {config}''')
_A = AlbertForPreTraining(snake_case__)
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case__ , snake_case__ , snake_case__)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , snake_case__)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 180 | from math import isqrt, loga
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 800_800 , snake_case__ :int = 800_800) -> int:
_A = degree * loga(snake_case__)
_A = int(snake_case__)
_A = calculate_prime_numbers(snake_case__)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase__ = 2
class lowerCamelCase_ :
def __init__( self : int , *, # begin keyword-only arguments
_A : str="<s>" , _A : List[Any]="<pad>" , _A : int="</s>" , _A : Tuple="<unk>" , _A : Dict=None , ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = bos, unk, pad, eos
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Any = {}
UpperCAmelCase__ : Optional[Any] = self.add_symbol(_A )
UpperCAmelCase__ : Tuple = self.add_symbol(_A )
UpperCAmelCase__ : Tuple = self.add_symbol(_A )
UpperCAmelCase__ : Any = self.add_symbol(_A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_A )
UpperCAmelCase__ : Any = len(self.symbols )
def __eq__( self : List[str] , _A : int ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : List[str] , _A : Optional[Any] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Dict , _A : List[str] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def lowercase_ ( cls : Any , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = cls()
d.add_from_file(_A )
return d
def lowercase_ ( self : Any , _A : Optional[Any] , _A : Optional[int]=1 , _A : Dict=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCAmelCase__ : Tuple = self.indices[word]
UpperCAmelCase__ : Optional[int] = self.count[idx] + n
return idx
else:
UpperCAmelCase__ : Union[str, Any] = len(self.symbols )
UpperCAmelCase__ : Dict = idx
self.symbols.append(_A )
self.count.append(_A )
return idx
def lowercase_ ( self : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return 0
def lowercase_ ( self : Dict , _A : Dict ):
'''simple docstring'''
if isinstance(_A , _A ):
try:
with open(_A , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_A ) )
return
UpperCAmelCase__ : List[str] = f.readlines()
UpperCAmelCase__ : Any = self._load_meta(_A )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase__ : int = True
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = line.rsplit(''' ''' , 1 )
else:
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Optional[int] = int(_A )
UpperCAmelCase__ : Any = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_A ) )
self.add_symbol(_A , n=_A , overwrite=_A )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def a__ ( lowerCAmelCase__ ) -> int:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : Union[str, Any] = dict((re.sub(R'''@@$''' , '''''' , lowerCAmelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , lowerCAmelCase__ ), v) for k, v in d.items() )
UpperCAmelCase__ : List[Any] = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
UpperCAmelCase__ : Union[str, Any] = d[k] # restore
return da
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
# prep
if not os.path.exists(lowerCAmelCase__ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
UpperCAmelCase__ : str = os.path.join(lowerCAmelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
UpperCAmelCase__ : str = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
UpperCAmelCase__ : Dict = chkpt['''cfg''']['''model''']
# dicts
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase__ , '''dict.txt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
UpperCAmelCase__ : Tuple = Dictionary.load(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : int = len(lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase__ : Optional[Any] = os.path.join(lowerCAmelCase__ , '''bpecodes''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
UpperCAmelCase__ : Union[str, Any] = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
# model config
UpperCAmelCase__ : Any = os.path.join(lowerCAmelCase__ , '''config.json''' )
UpperCAmelCase__ : Any = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Dict = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
UpperCAmelCase__ : Tuple = chkpt['''model''']
# remove unneeded keys
UpperCAmelCase__ : Optional[Any] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
UpperCAmelCase__ : str = model_state_dict.pop(lowerCAmelCase__ )
else:
UpperCAmelCase__ : Union[str, Any] = model_state_dict.pop(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = BioGptConfig.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase__ : Union[str, Any] = BioGptForCausalLM(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ )
# save
UpperCAmelCase__ : Union[str, Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase__ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 299 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_A : Any , **_A : int ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 299 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """BlipImageProcessor"""
lowerCAmelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , A : Tuple , A : Tuple ):
__snake_case: Tuple = False
super().__init__(A , A )
__snake_case: List[Any] = self.image_processor
def __call__( self : Dict , A : ImageInput = None , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : Dict , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__snake_case: Any = self.tokenizer
__snake_case: str = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
return text_encoding
# add pixel_values
__snake_case: Optional[Any] = self.image_processor(A , return_tensors=A )
if text is not None:
__snake_case: List[str] = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
else:
__snake_case: Dict = None
if text_encoding is not None:
encoding_image_processor.update(A )
return encoding_image_processor
def UpperCAmelCase__ ( self : Any , *A : Dict , **A : Union[str, Any] ):
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase__ ( self : int , *A : Optional[int] , **A : int ):
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = self.tokenizer.model_input_names
__snake_case: Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 111 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 111 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : Dict = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 215 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "xlm-roberta"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-1_2 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> str:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =hidden_act
a =intermediate_size
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =initializer_range
a =layer_norm_eps
a =position_embedding_type
a =use_cache
a =classifier_dropout
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 215 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a : Tuple = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a : Optional[int] = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a : Optional[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def A_ ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def A_ ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str=None , lowercase_ : Dict=None , lowercase_ : int=None , lowercase_ : str=None , lowercase_ : Any="auto" , lowercase_ : Union[str, Any]=-1 , lowercase_ : Dict=0.9 , lowercase_ : Optional[int]=5 , lowercase_ : Tuple=500 , lowercase_ : Union[str, Any]="gpt2-large" , lowercase_ : Optional[int]=-1 , lowercase_ : Any=1024 , lowercase_ : List[str]=25 , lowercase_ : Any=5 , lowercase_ : Union[str, Any]=True , lowercase_ : str=25 , ):
snake_case_ = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 56 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__UpperCamelCase : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__UpperCamelCase : Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
__UpperCamelCase : Dict = [file for file in filepaths if " " in file]
if space_files:
print(f"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
__UpperCamelCase : Tuple = [file for file in filepaths if "-" in file]
if hyphen_files:
print(f"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
__UpperCamelCase : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
__UpperCamelCase : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 368 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : NestedDataStructureLike[PathLike] , lowerCamelCase__ : Optional[NamedSplit] = None , lowerCamelCase__ : Optional[Features] = None , lowerCamelCase__ : str = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , **lowerCamelCase__ : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else {self.split: path_or_paths}
UpperCamelCase__ : Optional[Any] = Text(
cache_dir=lowerCamelCase__ , data_files=lowerCamelCase__ , features=lowerCamelCase__ , **lowerCamelCase__ , )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
if self.streaming:
UpperCamelCase__ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : List[str] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
UpperCamelCase__ : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset
| 51 | 0 |
'''simple docstring'''
def _A ( _lowerCAmelCase = 100 ):
"""simple docstring"""
__lowercase =set()
__lowercase =0
__lowercase =n + 1 # maximum limit
for a in range(2 , _lowerCAmelCase ):
for b in range(2 , _lowerCAmelCase ):
__lowercase =a**b # calculates the current power
collect_powers.add(_lowerCAmelCase ) # adds the result to the set
return len(_lowerCAmelCase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 166 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """OwlViTImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="max_length" , _lowerCAmelCase : Optional[Any]="np" , **_lowerCAmelCase : Any):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase) or (isinstance(_lowerCAmelCase , _lowerCAmelCase) and not isinstance(text[0] , _lowerCAmelCase)):
__lowercase =[self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(text[0] , _lowerCAmelCase):
__lowercase =[]
# Maximum number of queries across batch
__lowercase =max([len(_lowerCAmelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCAmelCase) != max_num_queries:
__lowercase =t + [' '] * (max_num_queries - len(_lowerCAmelCase))
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
encodings.append(_lowerCAmelCase)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
__lowercase =np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase =jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase =torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0)
__lowercase =torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase =tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
__lowercase =BatchEncoding()
__lowercase =input_ids
__lowercase =attention_mask
if query_images is not None:
__lowercase =BatchEncoding()
__lowercase =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase).pixel_values
__lowercase =query_pixel_values
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : str , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 166 | 1 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod() | 367 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure) | 270 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="summarization"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =ROUGE_KEYS
UpperCAmelCase_ ="rouge2"
def __init__( self , _A , **_A ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_A , num_labels=_A , mode=self.mode , **_A )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''metrics.json'''
SCREAMING_SNAKE_CASE_ = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = defaultdict(_A )
SCREAMING_SNAKE_CASE_ = self.config.model_type
SCREAMING_SNAKE_CASE_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
SCREAMING_SNAKE_CASE_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
SCREAMING_SNAKE_CASE_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE_ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE_ = get_git_info()['''repo_sha''']
SCREAMING_SNAKE_CASE_ = hparams.num_workers
SCREAMING_SNAKE_CASE_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE_ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE_ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE_ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE_ = self.model.config.max_length
SCREAMING_SNAKE_CASE_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self , _A ) -> Dict[str, List[str]]:
SCREAMING_SNAKE_CASE_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_A , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
SCREAMING_SNAKE_CASE_ = True
return readable_batch
def _UpperCamelCase ( self , _A , **_A ) -> List[str]:
return self.model(_A , **_A )
def _UpperCamelCase ( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
return lmap(str.strip , _A )
def _UpperCamelCase ( self , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = batch['''input_ids'''], batch['''attention_mask''']
SCREAMING_SNAKE_CASE_ = batch['''labels''']
if isinstance(self.model , _A ):
SCREAMING_SNAKE_CASE_ = self.model._shift_right(_A )
else:
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_A , _A )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE_ = decoder_input_ids
self.save_readable_batch(_A )
SCREAMING_SNAKE_CASE_ = self(_A , attention_mask=_A , decoder_input_ids=_A , use_cache=_A )
SCREAMING_SNAKE_CASE_ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE_ = nn.CrossEntropyLoss(ignore_index=_A )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ = nn.functional.log_softmax(_A , dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = label_smoothed_nll_loss(
_A , _A , self.hparams.label_smoothing , ignore_index=_A )
return (loss,)
@property
def _UpperCamelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
# tokens per batch
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A , _A="val" ) -> Dict:
self.step_count += 1
SCREAMING_SNAKE_CASE_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE_ = losses['''loss''']
SCREAMING_SNAKE_CASE_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
SCREAMING_SNAKE_CASE_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE_ = torch.tensor(_A ).type_as(_A )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_A )
SCREAMING_SNAKE_CASE_ = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE_ = self.step_count
self.metrics[prefix].append(_A ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE_ = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self , _A , _A ) -> Dict:
return calculate_rouge(_A , _A )
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE_ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_A , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE_ = (time.time() - ta) / batch['''input_ids'''].shape[0]
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(_A )
SCREAMING_SNAKE_CASE_ = self.ids_to_clean_text(batch['''labels'''] )
SCREAMING_SNAKE_CASE_ = self._step(_A )
SCREAMING_SNAKE_CASE_ = dict(zip(self.loss_names , _A ) )
SCREAMING_SNAKE_CASE_ = self.calc_generative_metrics(_A , _A )
SCREAMING_SNAKE_CASE_ = np.mean(lmap(_A , _A ) )
base_metrics.update(gen_time=_A , gen_len=_A , preds=_A , target=_A , **_A )
return base_metrics
def _UpperCamelCase ( self , _A , _A ) -> Any:
return self._generative_step(_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
return self.validation_epoch_end(_A , prefix='''test''' )
def _UpperCamelCase ( self , _A ) -> SeqaSeqDataset:
SCREAMING_SNAKE_CASE_ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE_ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE_ = self.dataset_class(
self.tokenizer , type_path=_A , n_obs=_A , max_target_length=_A , **self.dataset_kwargs , )
return dataset
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataset(_A )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_sortish_sampler(_A , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_A , batch_sampler=_A , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_A , batch_size=_A , collate_fn=dataset.collate_fn , shuffle=_A , num_workers=self.num_workers , sampler=_A , )
def _UpperCamelCase ( self ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_A )
return dataloader
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ) -> DataLoader:
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( _A , _A ) -> Dict:
BaseTransformer.add_model_specific_args(_A , _A )
add_generic_args(_A , _A )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_A )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_A )
parser.add_argument('''--max_tokens_per_batch''' , type=_A , default=_A )
parser.add_argument('''--logger_name''' , type=_A , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_A , default=500 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_A , default=-1 , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_A , default='''summarization''' , required=_A , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_A , default=0.0 , required=_A )
parser.add_argument('''--src_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--tgt_lang''' , type=_A , default='''''' , required=_A )
parser.add_argument('''--eval_beams''' , type=_A , default=_A , required=_A )
parser.add_argument(
'''--val_metric''' , type=_A , default=_A , required=_A , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_A , default=_A , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_A , default=1 , required=_A , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_A , default=-1 , required=_A , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="translation"
UpperCAmelCase_ =["loss"]
UpperCAmelCase_ =["bleu"]
UpperCAmelCase_ ="bleu"
def __init__( self , _A , **_A ) -> Optional[int]:
super().__init__(_A , **_A )
SCREAMING_SNAKE_CASE_ = hparams.src_lang
SCREAMING_SNAKE_CASE_ = hparams.tgt_lang
def _UpperCamelCase ( self , _A , _A ) -> dict:
return calculate_bleu(_A , _A )
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__lowerCamelCase )
check_output_dir(__lowerCamelCase, expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE_ = SummarizationModule(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = TranslationModule(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
SCREAMING_SNAKE_CASE_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = os.environ.get('''WANDB_PROJECT''', __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=__lowerCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = args.val_metric == '''loss'''
SCREAMING_SNAKE_CASE_ = generic_train(
__lowerCamelCase, __lowerCamelCase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, __lowerCamelCase ), early_stopping_callback=__lowerCamelCase, logger=__lowerCamelCase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE_ = ''''''
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=__lowerCamelCase ) )
if checkpoints:
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
SCREAMING_SNAKE_CASE_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 299 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__UpperCAmelCase = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = test_results.split(''' ''' )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_ = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__lowerCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''', __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
SCREAMING_SNAKE_CASE_ = line
SCREAMING_SNAKE_CASE_ = False
return failures
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = title
SCREAMING_SNAKE_CASE_ = doc_test_results['''time_spent'''].split(''',''' )[0]
SCREAMING_SNAKE_CASE_ = doc_test_results['''success''']
SCREAMING_SNAKE_CASE_ = doc_test_results['''failures''']
SCREAMING_SNAKE_CASE_ = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_ = doc_test_results
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self._time_spent]
SCREAMING_SNAKE_CASE_ = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_ = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
SCREAMING_SNAKE_CASE_ = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F'''{int(_A )}h{int(_A )}m{int(_A )}s'''
@property
def _UpperCamelCase ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
@property
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = 40
SCREAMING_SNAKE_CASE_ = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
SCREAMING_SNAKE_CASE_ = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def _UpperCamelCase ( ) -> Any:
SCREAMING_SNAKE_CASE_ = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def _UpperCamelCase ( self ) -> Optional[int]:
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE_ = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
SCREAMING_SNAKE_CASE_ = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def _UpperCamelCase ( self , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = ''''''
for key, value in failures.items():
SCREAMING_SNAKE_CASE_ = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
SCREAMING_SNAKE_CASE_ = job_name
SCREAMING_SNAKE_CASE_ = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_ = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCamelCase ( self ) -> int:
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
SCREAMING_SNAKE_CASE_ = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
SCREAMING_SNAKE_CASE_ = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
SCREAMING_SNAKE_CASE_ = F'''*Num failures* :{len(job_result["failed"] )} \n'''
SCREAMING_SNAKE_CASE_ = job_result['''failures''']
SCREAMING_SNAKE_CASE_ = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=F'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = os.environ['''GITHUB_RUN_ID''']
SCREAMING_SNAKE_CASE_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
SCREAMING_SNAKE_CASE_ = requests.get(__lowerCamelCase ).json()
SCREAMING_SNAKE_CASE_ = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE_ = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = requests.get(url + F'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''', __lowerCamelCase )
return {}
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {}
if os.path.exists(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = os.listdir(__lowerCamelCase )
for file in files:
try:
with open(os.path.join(__lowerCamelCase, __lowerCamelCase ), encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'''Could not open {os.path.join(__lowerCamelCase, __lowerCamelCase )}.''' ) from e
return _artifact
def A__ ( ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = name
SCREAMING_SNAKE_CASE_ = []
def __str__( self ) -> int:
return self.name
def _UpperCamelCase ( self , _A ) -> Tuple:
self.paths.append({'''name''': self.name, '''path''': path} )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_ = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_ = Artifact(__lowerCamelCase )
_available_artifacts[artifact_name].add_path(__lowerCamelCase )
return _available_artifacts
if __name__ == "__main__":
__UpperCAmelCase = get_job_links()
__UpperCAmelCase = retrieve_available_artifacts()
__UpperCAmelCase = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__UpperCAmelCase = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__UpperCAmelCase = github_actions_job_links.get("run_doctests")
__UpperCAmelCase = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
__UpperCAmelCase = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = handle_test_results(artifact["stats"])
__UpperCAmelCase = failed
__UpperCAmelCase = success
__UpperCAmelCase = time_spent[1:-1] + ", "
__UpperCAmelCase = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
__UpperCAmelCase = line.replace("FAILED ", "")
__UpperCAmelCase = line.split()[0].replace("\n", "")
if "::" in line:
__UpperCAmelCase , __UpperCAmelCase = line.split("::")
else:
__UpperCAmelCase , __UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__UpperCAmelCase = all_failures[test] if test in all_failures else "N/A"
__UpperCAmelCase = failure
break
__UpperCAmelCase = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 299 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def lowercase (_A ):
"""simple docstring"""
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--check_lib""", action="""store_true""", help="""Whether to check the build or the actual package.""")
lowerCAmelCase : Dict = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Union[str, Any] = importlib.import_module("""transformers""")
lowerCAmelCase : int = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : int = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError("""The built release does not contain the custom files. Fix this before going further!""")
| 25 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : Dict = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(_lowerCamelCase )
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self ,*a_ ,**a_ ) -> Union[str, Any]:
super().__init__(*a_ ,**a_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCAmelCase : List[str] = None
if self.model.config.prefix is not None:
_UpperCAmelCase : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCAmelCase : Union[str, Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : List[Any] = self._sanitize_parameters(prefix=a_ ,**self._forward_params )
_UpperCAmelCase : Optional[int] = {**self._preprocess_params, **preprocess_params}
_UpperCAmelCase : List[Any] = {**self._forward_params, **forward_params}
def _snake_case ( self ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,a_=None ,**a_ ,) -> Dict:
_UpperCAmelCase : int = {}
if prefix is not None:
_UpperCAmelCase : Union[str, Any] = prefix
if prefix:
_UpperCAmelCase : Union[str, Any] = self.tokenizer(
a_ ,padding=a_ ,add_special_tokens=a_ ,return_tensors=self.framework )
_UpperCAmelCase : Optional[int] = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
_UpperCAmelCase : Optional[Any] = handle_long_generation
preprocess_params.update(a_ )
_UpperCAmelCase : str = generate_kwargs
_UpperCAmelCase : str = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
_UpperCAmelCase : Any = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
_UpperCAmelCase : Tuple = ReturnType.TENSORS
if return_type is not None:
_UpperCAmelCase : int = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase : str = self.tokenizer.encode(a_ ,add_special_tokens=a_ )
if len(a_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self ,*a_ ,**a_ ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*a_ ,**a_ )
def __call__( self ,a_ ,**a_ ) -> str:
return super().__call__(a_ ,**a_ )
def _snake_case ( self ,a_ ,a_="" ,a_=None ,**a_ ) -> Optional[Any]:
_UpperCAmelCase : str = self.tokenizer(
prefix + prompt_text ,padding=a_ ,add_special_tokens=a_ ,return_tensors=self.framework )
_UpperCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
_UpperCAmelCase : Dict = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCAmelCase : str = generate_kwargs["""max_new_tokens"""]
else:
_UpperCAmelCase : Optional[int] = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCAmelCase : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
_UpperCAmelCase : Optional[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCAmelCase : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def _snake_case ( self ,a_ ,**a_ ) -> Union[str, Any]:
_UpperCAmelCase : Optional[Any] = model_inputs["""input_ids"""]
_UpperCAmelCase : List[str] = model_inputs.get("""attention_mask""" ,a_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : int = 1
else:
_UpperCAmelCase : List[str] = input_ids.shape[0]
_UpperCAmelCase : Any = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCAmelCase : List[Any] = generate_kwargs.pop("""prefix_length""" ,0 )
if prefix_length > 0:
_UpperCAmelCase : Optional[int] = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCAmelCase : Optional[int] = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCAmelCase : str = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCAmelCase : Optional[int] = self.model.generate(input_ids=a_ ,attention_mask=a_ ,**a_ )
_UpperCAmelCase : Dict = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCAmelCase : Optional[int] = generated_sequence.reshape(a_ ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase : Union[str, Any] = tf.reshape(a_ ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _snake_case ( self ,a_ ,a_=ReturnType.FULL_TEXT ,a_=True ) -> List[str]:
_UpperCAmelCase : Optional[Any] = model_outputs["""generated_sequence"""][0]
_UpperCAmelCase : Optional[int] = model_outputs["""input_ids"""]
_UpperCAmelCase : List[str] = model_outputs["""prompt_text"""]
_UpperCAmelCase : Optional[Any] = generated_sequence.numpy().tolist()
_UpperCAmelCase : Dict = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase : str = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCAmelCase : Tuple = self.tokenizer.decode(
a_ ,skip_special_tokens=a_ ,clean_up_tokenization_spaces=a_ ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCAmelCase : Union[str, Any] = 0
else:
_UpperCAmelCase : Tuple = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=a_ ,clean_up_tokenization_spaces=a_ ,) )
if return_type == ReturnType.FULL_TEXT:
_UpperCAmelCase : Any = prompt_text + text[prompt_length:]
else:
_UpperCAmelCase : Dict = text[prompt_length:]
_UpperCAmelCase : Union[str, Any] = {"""generated_text""": all_text}
records.append(a_ )
return records
| 215 | 1 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING
__magic_name__ :List[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
lowerCAmelCase__ :Dict = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 3_8_0_1_5, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 2_5_5_0_6, 'token_str': ' accuser'},
] , )
lowerCAmelCase__ :List[Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 3_8_0_1_5,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 2_5_5_0_6,
'token_str': ' accuser',
},
] , )
lowerCAmelCase__ :Optional[Any] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2_9_4_1, 'token_str': ' Te'},
] , )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
lowerCAmelCase__ :str = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 3_5_6_7_6, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 1_6_4_1_6, 'token_str': 'ELS'},
] , )
lowerCAmelCase__ :List[str] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 1_6_4_1_6, 'token_str': 'ELS'},
] , )
lowerCAmelCase__ :Any = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 2_9_4_1, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
] , )
lowerCAmelCase__ :Dict = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6 ) , [
[
{
'score': 2.2E-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 1_6_4_1_6, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 3_5_6_7_6,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 1_6_4_1_6, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
lowerCAmelCase__ :List[Any] = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
@slow
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(__UpperCAmelCase )
@slow
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'sequence': 'My name is John', 'score': 0.0_08, 'token': 6_1_0, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_07, 'token': 1_5_7_3, 'token_str': ' Chris'},
] , )
lowerCAmelCase__ :str = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_51,
'token': 2_2_0_1,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_14,
'token': 1_2_7_9_0,
'token_str': ' Lyon',
},
] , )
lowerCAmelCase__ :Union[str, Any] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_05, 'token': 3_4_9_9, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_00, 'token': 1_3_6_0_6, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_00, 'token': 2_9_4_1, 'token_str': ' Te'},
] , )
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :int = None
self.run_pipeline_test(__UpperCAmelCase , [] )
@require_tf
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
lowerCAmelCase__ :Union[str, Any] = None
lowerCAmelCase__ :Optional[Any] = None
self.run_pipeline_test(__UpperCAmelCase , [] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
lowerCAmelCase__ :Optional[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [
F"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = fill_masker.tokenizer
lowerCAmelCase__ :Tuple = fill_masker.model
lowerCAmelCase__ :List[str] = fill_masker(
F"This is a {tokenizer.mask_token}" , )
self.assertEqual(
__UpperCAmelCase , [
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ :List[Any] = fill_masker([F"This is a {tokenizer.mask_token}"] )
self.assertEqual(
__UpperCAmelCase , [
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ :Optional[Any] = fill_masker([F"This is a {tokenizer.mask_token}", F"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
__UpperCAmelCase , [
[
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
],
[
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
],
] , )
with self.assertRaises(__UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__UpperCAmelCase ):
fill_masker('This is' )
self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase )
self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase )
self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = tokenizer.get_vocab()
lowerCAmelCase__ :List[str] = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCAmelCase__ :Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
__UpperCAmelCase , [
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ :str = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , __UpperCAmelCase )
lowerCAmelCase__ :int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(__UpperCAmelCase ) )
# Call argument
lowerCAmelCase__ :Tuple = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :int = fill_masker(F"This is a {tokenizer.mask_token}" , targets=__UpperCAmelCase )
self.assertEqual(
__UpperCAmelCase , [
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ :int = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , __UpperCAmelCase )
lowerCAmelCase__ :Any = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(__UpperCAmelCase ) )
# Score equivalence
lowerCAmelCase__ :List[str] = fill_masker(F"This is a {tokenizer.mask_token}" , targets=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = [top_mask['token_str'] for top_mask in outputs]
lowerCAmelCase__ :Optional[int] = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ) == set(__UpperCAmelCase ):
lowerCAmelCase__ :Optional[int] = fill_masker(F"This is a {tokenizer.mask_token}" , targets=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :int = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[''] )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :str = fill_masker(F"This is a {tokenizer.mask_token}" , targets='' )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2 )
lowerCAmelCase__ :List[Any] = fill_masker(F"This is a {tokenizer.mask_token}" )
self.assertEqual(
__UpperCAmelCase , [
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
] , )
lowerCAmelCase__ :Any = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = tokenizer.get_vocab()
lowerCAmelCase__ :Any = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
# top_k=2, ntargets=3
lowerCAmelCase__ :Any = sorted(vocab.keys() )[:3]
lowerCAmelCase__ :List[Any] = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 , targets=__UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCAmelCase__ :Union[str, Any] = [el['token_str'] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase ).issubset(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=3 , targets=__UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCAmelCase ) , nested_simplify(__UpperCAmelCase ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCAmelCase__ :List[Any] = sorted(vocab.keys() )[:3]
lowerCAmelCase__ :Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCAmelCase__ :Optional[int] = fill_masker(F"My name is {tokenizer.mask_token}" , targets=__UpperCAmelCase , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCAmelCase ) , 3 )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :int = fill_masker(
F"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
__UpperCAmelCase , [
[
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
],
[
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
],
[
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
{'sequence': ANY(__UpperCAmelCase ), 'score': ANY(__UpperCAmelCase ), 'token': ANY(__UpperCAmelCase ), 'token_str': ANY(__UpperCAmelCase )},
],
] , )
| 254 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) ->str:
"""simple docstring"""
if attention_mask is None:
lowerCAmelCase__ :List[str] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ :Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ :Union[str, Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
lowerCAmelCase__ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
lowerCAmelCase__ :List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=9_9 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=2_0 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = parent
lowerCAmelCase__ :Any = batch_size
lowerCAmelCase__ :Optional[Any] = seq_length
lowerCAmelCase__ :int = is_training
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :Union[str, Any] = vocab_size
lowerCAmelCase__ :Tuple = hidden_size
lowerCAmelCase__ :Tuple = num_hidden_layers
lowerCAmelCase__ :Tuple = num_attention_heads
lowerCAmelCase__ :Dict = intermediate_size
lowerCAmelCase__ :Optional[int] = hidden_act
lowerCAmelCase__ :Any = hidden_dropout_prob
lowerCAmelCase__ :Dict = attention_probs_dropout_prob
lowerCAmelCase__ :Tuple = encoder_layerdrop
lowerCAmelCase__ :Tuple = decoder_layerdrop
lowerCAmelCase__ :Tuple = max_position_embeddings
lowerCAmelCase__ :Any = eos_token_id
lowerCAmelCase__ :str = pad_token_id
lowerCAmelCase__ :Tuple = bos_token_id
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Tuple = self.eos_token_id # Eos Token
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ :List[Any] = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ :Dict = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ :Optional[Any] = self.get_config()
lowerCAmelCase__ :Any = prepare_mam_aaa_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def snake_case ( self ):
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = MaMaaaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval()
lowerCAmelCase__ :Optional[int] = inputs_dict['input_ids']
lowerCAmelCase__ :Any = inputs_dict['attention_mask']
lowerCAmelCase__ :Tuple = inputs_dict['head_mask']
# first forward pass
lowerCAmelCase__ :int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ :Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ :int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCAmelCase__ :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ :Union[str, Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCAmelCase__ :Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )['last_hidden_state']
lowerCAmelCase__ :Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[
'last_hidden_state'
]
# select random slice
lowerCAmelCase__ :Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ :List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ :Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 ) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MaMaaaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval()
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ :int = outputs.encoder_last_hidden_state
lowerCAmelCase__ :Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Union[str, Any] = model.get_encoder()
encoder.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Any = MaMaaaEncoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ :Optional[int] = model.get_decoder()
decoder.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Dict = MaMaaaDecoder.from_pretrained(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ :int = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__magic_name__ :str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__magic_name__ :str = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__magic_name__ :Any = True
__magic_name__ :Union[str, Any] = True
__magic_name__ :Tuple = False
__magic_name__ :List[str] = False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = MaMaaaModelTester(self )
lowerCAmelCase__ :Tuple = ConfigTester(self , config_class=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = model_class.from_pretrained(__UpperCAmelCase , output_loading_info=__UpperCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCAmelCase__ :Optional[int] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[Any] = copy.deepcopy(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
if not self.is_encoder_decoder:
lowerCAmelCase__ :List[str] = inputs['input_ids']
del inputs["input_ids"]
else:
lowerCAmelCase__ :int = inputs['input_ids']
lowerCAmelCase__ :str = inputs.get('decoder_input_ids' , __UpperCAmelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCAmelCase__ :Tuple = wte(__UpperCAmelCase )
else:
lowerCAmelCase__ :List[Any] = wte(__UpperCAmelCase )
lowerCAmelCase__ :Dict = wte(__UpperCAmelCase )
with torch.no_grad():
model(**__UpperCAmelCase )[0]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :int = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ :Any = input_dict['input_ids']
lowerCAmelCase__ :Optional[Any] = input_ids.ne(1 ).to(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = MaMaaaForConditionalGeneration(__UpperCAmelCase ).eval().to(__UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
model.generate(num_beams=4 , do_sample=__UpperCAmelCase , early_stopping=__UpperCAmelCase , num_return_sequences=3 )
def __A (_SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
return torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__A = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
lowerCAmelCase__ :str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCAmelCase__ :Optional[int] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCAmelCase__ :Union[str, Any] = prepare_mam_aaa_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :Any = model(**__UpperCAmelCase )[0]
lowerCAmelCase__ :List[str] = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
lowerCAmelCase__ :int = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
# change to intended input
lowerCAmelCase__ :str = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
lowerCAmelCase__ :Any = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
lowerCAmelCase__ :List[Any] = prepare_mam_aaa_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase )[0]
lowerCAmelCase__ :Any = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
lowerCAmelCase__ :List[Any] = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
lowerCAmelCase__ :Tuple = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCAmelCase__ :Union[str, Any] = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='pt' )
lowerCAmelCase__ :List[Any] = model.generate(
input_ids=dct['input_ids'].to(__UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(__UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
lowerCAmelCase__ :Optional[Any] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
lowerCAmelCase__ :Any = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
assert generated == expected_en
| 254 | 1 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
while b:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = b, a % b
return a
def __a ( __lowerCamelCase, __lowerCamelCase ):
return a if b == 0 else euclidean_gcd_recursive(__lowerCamelCase, a % b )
def __a ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" )
if __name__ == "__main__":
main()
| 61 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Union[str, Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def _A ( lowercase , lowercase=False ):
"""simple docstring"""
a =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _A ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
a =''''''
else:
a ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
a =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a =in_proj_weight[
: config.hidden_size, :
]
a =in_proj_bias[: config.hidden_size]
a =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a =in_proj_weight[
-config.hidden_size :, :
]
a =in_proj_bias[-config.hidden_size :]
def _A ( lowercase ):
"""simple docstring"""
a =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =dct.pop(UpperCamelCase__ )
a =val
def _A ( ):
"""simple docstring"""
a ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a =Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _A ( lowercase , lowercase ):
"""simple docstring"""
a =ViTConfig()
a =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
a =True
a =int(vit_name[-12:-10] )
a =int(vit_name[-9:-6] )
else:
a =10_00
a ='''huggingface/label-files'''
a ='''imagenet-1k-id2label.json'''
a =json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
a ={int(UpperCamelCase__ ): v for k, v in idalabel.items()}
a =idalabel
a ={v: k for k, v in idalabel.items()}
a =int(vit_name[-6:-4] )
a =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
a =1_92
a =7_68
a =12
a =3
elif vit_name[9:].startswith('''small''' ):
a =3_84
a =15_36
a =12
a =6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
a =7_68
a =23_04
a =8
a =8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
a =10_24
a =40_96
a =24
a =16
elif vit_name[4:].startswith('''huge''' ):
a =12_80
a =51_20
a =32
a =16
# load original model from timm
a =timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a =timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
a =create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
a =ViTModel(UpperCamelCase__ ).eval()
else:
a =ViTForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
a =DeiTImageProcessor(size=config.image_size )
else:
a =ViTImageProcessor(size=config.image_size )
a =image_processor(images=prepare_img() , return_tensors='''pt''' )
a =encoding['''pixel_values''']
a =model(UpperCamelCase__ )
if base_model:
a =timm_model.forward_features(UpperCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase__ , outputs.pooler_output , atol=1E-3 )
else:
a =timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase_ : Dict = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path) | 352 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase_ : Any = random.Random()
def _A ( lowercase , lowercase=1.0 , lowercase=None , lowercase=None ):
"""simple docstring"""
if rng is None:
a =global_rng
a =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __A , __A=7 , __A=400 , __A=2000 , __A=10 , __A=160 , __A=8 , __A=0.0 , __A=4000 , __A=False , __A=True , ) -> Optional[Any]:
a =parent
a =batch_size
a =min_seq_length
a =max_seq_length
a =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a =padding_value
a =sampling_rate
a =return_attention_mask
a =do_normalize
a =feature_size
a =chunk_length
a =hop_length
def SCREAMING_SNAKE_CASE ( self ) -> str:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self , __A=False , __A=False ) -> str:
def _flatten(__A ):
return list(itertools.chain(*__A ) )
if equal_length:
a =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a =[np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
a =self.feature_extraction_class.from_pretrained(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a =os.path.join(__A , '''feat_extract.json''' )
feat_extract_first.to_json_file(__A )
a =self.feature_extraction_class.from_json_file(__A )
a =feat_extract_first.to_dict()
a =feat_extract_second.to_dict()
a =feat_extract_first.mel_filters
a =feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
a =feature_extractor(__A , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
a =feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
a =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test batched
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a =[floats_list((1, x) )[0] for x in (800, 800, 800)]
a =np.asarray(__A )
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
# Test truncation required
a =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
a =[np.asarray(__A ) for speech_input in speech_inputs]
a =[x[: feature_extractor.n_samples] for x in speech_inputs]
a =[np.asarray(__A ) for speech_input in speech_inputs_truncated]
a =feature_extractor(__A , return_tensors='''np''' ).input_features
a =feature_extractor(__A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
import torch
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =np.random.rand(100 , 32 ).astype(np.floataa )
a =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a =feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
a =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
a =ds.sort('''id''' ).select(range(__A ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
a =torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
a =self._load_datasamples(1 )
a =WhisperFeatureExtractor()
a =feature_extractor(__A , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __A , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a =self._load_datasamples(1 )[0]
a =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
a =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1E-3 ) ) | 215 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : Dict =filter(lambda __lowerCamelCase : p.requires_grad , model.parameters() )
lowerCamelCase__ : Tuple =sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowercase : Union[str, Any] = logging.getLogger(__name__)
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Dict ):
"""simple docstring"""
if metric == "rouge2":
lowerCamelCase__ : Dict ='''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCamelCase__ : int ='''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCamelCase__ : Any ='''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
lowerCamelCase__ : Tuple ='''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
lowerCamelCase__ : Any =ModelCheckpoint(
dirpath=__lowerCAmelCase , filename=__lowerCAmelCase , monitor=f'''val_{metric}''' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case__ ( __lowerCamelCase : int , __lowerCamelCase : List[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__lowerCAmelCase , verbose=__lowerCAmelCase , )
class __SCREAMING_SNAKE_CASE ( pl.Callback ):
'''simple docstring'''
def snake_case ( self : List[str], lowerCamelCase : Optional[int], lowerCamelCase : Dict )-> Union[str, Any]:
lowerCamelCase__ : Tuple ={F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def snake_case ( self : Tuple, lowerCamelCase : pl.Trainer, lowerCamelCase : pl.LightningModule, lowerCamelCase : str, lowerCamelCase : List[str]=True )-> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowerCamelCase__ : Union[str, Any] =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCamelCase__ : Optional[Any] =Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase__ : Any =od / '''test_results.txt'''
lowerCamelCase__ : Union[str, Any] =od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase__ : str =od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowerCamelCase__ : Optional[Any] =od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__, '''a+''' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase__ : Tuple =metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__, torch.Tensor ):
lowerCamelCase__ : List[str] =val.item()
lowerCamelCase__ : str =F'''{key}: {val:.6f}\n'''
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase__ : Tuple ='''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def snake_case ( self : str, lowerCamelCase : int, lowerCamelCase : List[Any] )-> Any:
try:
lowerCamelCase__ : int =pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase__ : Tuple =pl_module.model.num_parameters()
lowerCamelCase__ : int =count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def snake_case ( self : Union[str, Any], lowerCamelCase : pl.Trainer, lowerCamelCase : pl.LightningModule )-> List[str]:
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, '''test''' )
@rank_zero_only
def snake_case ( self : Optional[int], lowerCamelCase : pl.Trainer, lowerCamelCase : List[str] )-> Dict:
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 238 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ ={"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ =["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ =[
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
UpperCamelCase_ =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 128 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 128 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__a: int = logging.get_logger(__name__)
__a: Optional[int] = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def __UpperCamelCase ( UpperCAmelCase ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase__ : Any = k.replace(UpperCAmelCase , UpperCAmelCase )
if k.startswith('''encoder''' ):
lowercase__ : Any = k.replace('''.attn''' , '''.self_attn''' )
lowercase__ : str = k.replace('''norm1''' , '''self_attn_layer_norm''' )
lowercase__ : List[Any] = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
lowercase__ : Dict = k.replace('''norm1''' , '''self_attn_layer_norm''' )
lowercase__ : Dict = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
lowercase__ : Optional[Any] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Union[str, Any] = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
lowercase__ : List[str] = sd.pop(UpperCAmelCase )
lowercase__ : Optional[int] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
lowercase__ : Dict = v
__a: List[str] = ["""START"""]
@torch.no_grad()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowercase__ : Union[str, Any] = model['''model''']
lowercase__ : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase )
lowercase__ : List[Any] = BlenderbotForConditionalGeneration(UpperCAmelCase )
lowercase__ : Any = m.model.state_dict().keys()
lowercase__ : Any = []
lowercase__ : str = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase__ : str = rename_state_dict_key(UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase__ : int = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase )
m.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
m.half()
m.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__a: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
__a: Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 214 | '''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ : Optional[int] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict:
return "lower newer", "lower newer"
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase__ : List[Any] = '''lower'''
lowercase__ : Any = ['''low''', '''er</w>''']
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Tuple = tokens + ['''<unk>''']
lowercase__ : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase=15 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
lowercase__ : List[str] = '''This is a simple input'''
lowercase__ : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__ : str = ('''This is a simple input''', '''This is a pair''')
lowercase__ : str = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding='''max_length''' , )
def _lowerCAmelCase( self ) -> Tuple:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( a__ ):
'''simple docstring'''
pass
| 214 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = LxmertConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
__UpperCAmelCase : List[Any] = LxmertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 254 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCamelCase = '''pt'''
elif is_tf_available():
_UpperCamelCase = '''tf'''
else:
_UpperCamelCase = '''jax'''
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Dict = ByTaTokenizer
_SCREAMING_SNAKE_CASE : List[Any] = False
def __A ( self ) -> int:
'''simple docstring'''
super().setUp()
__UpperCAmelCase : Tuple = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def __A ( self , **__UpperCAmelCase ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=20 , __UpperCAmelCase=5 ) -> Tuple[str, list]:
'''simple docstring'''
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCAmelCase : Optional[Any] = []
for i in range(len(__UpperCAmelCase ) ):
try:
__UpperCAmelCase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCAmelCase : List[Any] = list(filter(lambda __UpperCAmelCase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , __UpperCAmelCase ) )
__UpperCAmelCase : List[Any] = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) )
if max_length is not None and len(__UpperCAmelCase ) > max_length:
__UpperCAmelCase : Dict = toks[:max_length]
if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0:
while len(__UpperCAmelCase ) < min_length:
__UpperCAmelCase : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase : Tuple = [t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase : Union[str, Any] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
if " " not in output_txt and len(__UpperCAmelCase ) > 1:
__UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase )
)
if with_prefix_space:
__UpperCAmelCase : List[Any] = """ """ + output_txt
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
return output_txt, output_ids
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.ta_base_tokenizer
__UpperCAmelCase : Optional[int] = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
__UpperCAmelCase : List[str] = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.ta_base_tokenizer
__UpperCAmelCase : List[Any] = """Unicode €."""
__UpperCAmelCase : Dict = tokenizer(__UpperCAmelCase )
__UpperCAmelCase : Tuple = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __UpperCAmelCase )
# decoding
__UpperCAmelCase : List[Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , """Unicode €.</s>""" )
__UpperCAmelCase : Dict = tokenizer("""e è é ê ë""" )
__UpperCAmelCase : List[str] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["""input_ids"""] , __UpperCAmelCase )
# decoding
__UpperCAmelCase : Union[str, Any] = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = self.ta_base_tokenizer
__UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__UpperCAmelCase : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__UpperCAmelCase : Any = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
if FRAMEWORK != "jax":
__UpperCAmelCase : List[str] = list(batch.input_ids.numpy()[0] )
else:
__UpperCAmelCase : Tuple = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.ta_base_tokenizer
__UpperCAmelCase : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCAmelCase : Tuple = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __UpperCAmelCase )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertNotIn("""decoder_input_ids""" , __UpperCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , __UpperCAmelCase )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.ta_base_tokenizer
__UpperCAmelCase : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
__UpperCAmelCase : List[str] = tokenizer(
text_target=__UpperCAmelCase , max_length=32 , padding="""max_length""" , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.ta_base_tokenizer
__UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization. </s>"""]
__UpperCAmelCase : Tuple = ["""Summary of the text. </s>"""]
# fmt: off
__UpperCAmelCase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__UpperCAmelCase : List[str] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__UpperCAmelCase : Optional[int] = tokenizer(__UpperCAmelCase , text_target=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , batch["""input_ids"""][0] )
self.assertEqual(__UpperCAmelCase , batch["""labels"""][0] )
def __A ( self ) -> List[str]:
'''simple docstring'''
# safety check on max_len default value so we are sure the test works
__UpperCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCAmelCase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : Any = tempfile.mkdtemp()
__UpperCAmelCase : Any = """ He is very happy, UNwant\u00E9d,running"""
__UpperCAmelCase : Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase : List[Any] = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
shutil.rmtree(__UpperCAmelCase )
__UpperCAmelCase : Optional[int] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCAmelCase : str = tempfile.mkdtemp()
__UpperCAmelCase : Dict = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__UpperCAmelCase : str = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Tuple = tokenizer.__class__.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Tuple = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCAmelCase : Any = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__UpperCAmelCase )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase : Optional[Any] = json.load(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCAmelCase : Optional[int] = json.load(__UpperCAmelCase )
__UpperCAmelCase : Any = [f'<extra_id_{i}>' for i in range(125 )]
__UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__UpperCAmelCase : Optional[Any] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__UpperCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCAmelCase : int = tokenizer_class.from_pretrained(
__UpperCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCAmelCase : int = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__UpperCAmelCase )]
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained(
__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCAmelCase )
__UpperCAmelCase : Any = tokenizer_class.from_pretrained(__UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == """""" )
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> str:
'''simple docstring'''
pass
def __A ( self ) -> List[str]:
'''simple docstring'''
pass
def __A ( self ) -> str:
'''simple docstring'''
pass
def __A ( self ) -> Any:
'''simple docstring'''
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__UpperCAmelCase : Tuple = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCAmelCase : Optional[int] = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
__UpperCAmelCase : List[str] = tokenizer.convert_tokens_to_string(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCAmelCase : List[str] = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(
__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
for attr in attributes_list:
setattr(__UpperCAmelCase , attr + """_id""" , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + """_id""" ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , attr + """_id""" , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(getattr(__UpperCAmelCase , attr + """_id""" ) , __UpperCAmelCase )
setattr(__UpperCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__UpperCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__UpperCAmelCase , """additional_special_tokens_ids""" ) , [] )
setattr(__UpperCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__UpperCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 254 | 1 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> datetime:
_lowerCAmelCase : Tuple = year % 19
_lowerCAmelCase : List[Any] = year % 4
_lowerCAmelCase : Tuple = year % 7
_lowerCAmelCase : int = math.floor(year / 100 )
_lowerCAmelCase : Union[str, Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_lowerCAmelCase : List[str] = leap_day_inhibits / 4
_lowerCAmelCase : Union[str, Any] = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_lowerCAmelCase : List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Union[str, Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_lowerCAmelCase : str = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase ,4 ,19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_lowerCamelCase ,4 ,18 )
else:
return datetime(_lowerCamelCase ,3 ,22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
_a : str = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 126 | """simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = None , a__ = None , a__ = True , a__ = None , a__ = False , a__ = None , a__ = True , a__ = "arrow" , **a__ , ):
super().__init__(
split=a__ , features=a__ , cache_dir=a__ , keep_in_memory=a__ , streaming=a__ , **a__ , )
_lowerCAmelCase : List[Any] = load_from_cache_file
_lowerCAmelCase : str = file_format
_lowerCAmelCase : Dict = Spark(
df=a__ , features=a__ , cache_dir=a__ , working_dir=a__ , **a__ , )
def __A ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowerCAmelCase : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 126 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =StableUnCLIPImgaImgPipeline
__a =TEXT_GUIDED_IMAGE_VARIATION_PARAMS
__a =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a =frozenset([] )
def UpperCamelCase__ ( self : Dict ):
_a = 32
_a = embedder_hidden_size
# image encoding components
_a = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_a = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__a , projection_dim=__a , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_a = StableUnCLIPImageNormalizer(embedding_dim=__a )
_a = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
_a = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
_a = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__a , layers_per_block=1 , upcast_attention=__a , use_linear_projection=__a , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__a , steps_offset=1 , )
torch.manual_seed(0 )
_a = AutoencoderKL()
_a = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCamelCase__ ( self : Optional[Any] , __a : Optional[Any] , __a : List[Any]=0 , __a : Dict=True ):
if str(__a ).startswith("mps" ):
_a = torch.manual_seed(__a )
else:
_a = torch.Generator(device=__a ).manual_seed(__a )
_a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if pil_image:
_a = input_image * 0.5 + 0.5
_a = input_image.clamp(0 , 1 )
_a = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_a = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self : Optional[Any] ):
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = StableUnCLIPImgaImgPipeline(**__a )
_a = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
_a = self.get_dummy_inputs(__a )
inputs.update({"image_embeds": None} )
_a = sd_pipe(**__a ).images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase__ ( self : List[Any] ):
_a = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=__a )
def UpperCamelCase__ ( self : Any ):
_a = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__a )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self : Union[str, Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__a )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self : str ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
_a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = pipe(__a , "anime turle" , generator=__a , output_type="np" )
_a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
_a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = pipe(__a , "anime turle" , generator=__a , output_type="np" )
_a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__a , __a )
def UpperCamelCase__ ( self : Any ):
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
_a = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_a = pipe(
__a , "anime turtle" , num_inference_steps=2 , output_type="np" , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 63 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ : int = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
A_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 215 | 0 |
import numpy as np
_lowerCamelCase : Optional[int] = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class __snake_case :
def __init__( self : Tuple ) -> None:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : str ) -> np.ndarray:
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = np.where(letter == self.SQUARE )
_lowerCAmelCase : Tuple = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Dict = message.lower()
_lowerCAmelCase : List[Any] = message.replace(""" """ , """""" )
_lowerCAmelCase : int = message.replace("""j""" , """i""" )
_lowerCAmelCase : Dict = np.empty((2, len(_UpperCAmelCase )) )
for letter_index in range(len(_UpperCAmelCase ) ):
_lowerCAmelCase : Tuple = self.letter_to_numbers(message[letter_index] )
_lowerCAmelCase : Optional[Any] = numbers[0]
_lowerCAmelCase : Tuple = numbers[1]
_lowerCAmelCase : int = first_step.reshape(2 * len(_UpperCAmelCase ) )
_lowerCAmelCase : Optional[int] = """"""
for numbers_index in range(len(_UpperCAmelCase ) ):
_lowerCAmelCase : int = int(second_step[numbers_index * 2] )
_lowerCAmelCase : str = int(second_step[(numbers_index * 2) + 1] )
_lowerCAmelCase : Optional[Any] = self.numbers_to_letter(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Any = message.lower()
message.replace(""" """ , """""" )
_lowerCAmelCase : Union[str, Any] = np.empty(2 * len(_UpperCAmelCase ) )
for letter_index in range(len(_UpperCAmelCase ) ):
_lowerCAmelCase : Optional[int] = self.letter_to_numbers(message[letter_index] )
_lowerCAmelCase : List[str] = numbers[0]
_lowerCAmelCase : Union[str, Any] = numbers[1]
_lowerCAmelCase : Optional[int] = first_step.reshape((2, len(_UpperCAmelCase )) )
_lowerCAmelCase : str = """"""
for numbers_index in range(len(_UpperCAmelCase ) ):
_lowerCAmelCase : Optional[Any] = int(second_step[0, numbers_index] )
_lowerCAmelCase : Optional[int] = int(second_step[1, numbers_index] )
_lowerCAmelCase : List[Any] = self.numbers_to_letter(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Dict = decoded_message + letter
return decoded_message
| 159 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def _UpperCAmelCase (UpperCamelCase_ : Sequence[float] , UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_lowerCAmelCase : List[str] = (low + high) // 2
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = max_subarray(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = max_subarray(UpperCamelCase_ , mid + 1 , UpperCamelCase_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = max_cross_sum(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def _UpperCAmelCase (UpperCamelCase_ : Sequence[float] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = float("""-inf""" ), -1
_lowerCAmelCase , _lowerCAmelCase : str = float("""-inf""" ), -1
_lowerCAmelCase : int | float = 0
for i in range(UpperCamelCase_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_lowerCAmelCase : Any = summ
_lowerCAmelCase : Tuple = i
_lowerCAmelCase : int = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_lowerCAmelCase : List[Any] = summ
_lowerCAmelCase : str = i
return max_left, max_right, (left_sum + right_sum)
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
_lowerCAmelCase : str = [randint(1 , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )]
_lowerCAmelCase : str = time.time()
max_subarray(UpperCamelCase_ , 0 , input_size - 1 )
_lowerCAmelCase : Any = time.time()
return end - start
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Any = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_lowerCAmelCase : Any = [time_max_subarray(UpperCamelCase_ ) for input_size in input_sizes]
print("""No of Inputs\t\tTime Taken""" )
for input_size, runtime in zip(UpperCamelCase_ , UpperCamelCase_ ):
print(UpperCamelCase_ , """\t\t""" , UpperCamelCase_ )
plt.plot(UpperCamelCase_ , UpperCamelCase_ )
plt.xlabel("""Number of Inputs""" )
plt.ylabel("""Time taken in seconds""" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 159 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__=100 , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = 100
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = scope
UpperCamelCase_ = out_indices
UpperCamelCase_ = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase_ = (image_size // patch_size) ** 2
UpperCamelCase_ = num_patches + 1
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = self.type_sequence_label_size
UpperCamelCase_ = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase_ = 1
UpperCamelCase_ = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCamelCase_ = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCamelCase_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase (a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BeitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(snake_case__ )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCamelCase_ = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCamelCase_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCamelCase_ = model(**snake_case__ ).loss
loss.backward()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCamelCase_ = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCamelCase_ = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCamelCase_ = model(**snake_case__ ).loss
loss.backward()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _lowerCAmelCase ():
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCamelCase_ = torch.ones((1, 196) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , snake_case__ )
UpperCamelCase_ = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**snake_case__ )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(logits.shape , snake_case__ )
UpperCamelCase_ = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCamelCase_ = 281
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**snake_case__ )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , snake_case__ )
UpperCamelCase_ = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCamelCase_ = 2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCamelCase_ = model.to(snake_case__ )
UpperCamelCase_ = BeitImageProcessor(do_resize=snake_case__ , size=640 , do_center_crop=snake_case__ )
UpperCamelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCamelCase_ = Image.open(ds[0]["file"] )
UpperCamelCase_ = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**snake_case__ )
UpperCamelCase_ = outputs.logits
# verify the logits
UpperCamelCase_ = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , snake_case__ )
UpperCamelCase_ = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCamelCase_ = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=snake_case__ , )
else:
UpperCamelCase_ = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCamelCase_ = model.to(snake_case__ )
UpperCamelCase_ = BeitImageProcessor(do_resize=snake_case__ , size=640 , do_center_crop=snake_case__ )
UpperCamelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCamelCase_ = Image.open(ds[0]["file"] )
UpperCamelCase_ = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**snake_case__ )
UpperCamelCase_ = outputs.logits.detach().cpu()
UpperCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(500, 300)] )
UpperCamelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCamelCase_ = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 128 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
for part_id in partition_order:
UpperCamelCase_ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""").collect()
for row_idx, row in enumerate(_lowerCAmelCase):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(2)
UpperCamelCase_ = [1, 0]
UpperCamelCase_ = _generate_iterable_examples(_lowerCAmelCase , _lowerCAmelCase) # Reverse the partitions.
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , _lowerCAmelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(1)
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
UpperCamelCase_ = lambda _lowerCAmelCase: x.reverse()
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [2, 1, 0])
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shuffle_data_sources(_lowerCAmelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(20).repartition(4)
# Partitions 0 and 2
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 128 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( A__ , unittest.TestCase ):
__a = FunnelTokenizer
__a = FunnelTokenizerFast
__a = True
__a = True
def lowercase ( self : int ):
super().setUp()
_snake_case = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase ( self : Union[str, Any] , **_lowerCamelCase : Optional[int] ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__A )
def lowercase ( self : str , **_lowerCamelCase : List[str] ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def lowercase ( self : List[Any] , _lowerCamelCase : List[Any] ):
_snake_case = """UNwant\u00E9d,running"""
_snake_case = """unwanted, running"""
return input_text, output_text
def lowercase ( self : Dict ):
_snake_case = self.tokenizer_class(self.vocab_file )
_snake_case = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self : Tuple ):
_snake_case = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
_snake_case = tokenizer('''UNwant\u00E9d,running''' )
_snake_case = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
_snake_case = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 351 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """roberta"""
def __init__( self : str , _lowerCamelCase : Dict=50265 , _lowerCamelCase : Tuple=768 , _lowerCamelCase : List[Any]=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : Optional[int]=3072 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : int=2 , _lowerCamelCase : str=0.0_2 , _lowerCamelCase : List[Any]=1e-12 , _lowerCamelCase : int=1 , _lowerCamelCase : int=0 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : List[Any]="absolute" , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : str=None , **_lowerCamelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = classifier_dropout
class lowerCAmelCase__ ( A_ ):
@property
def lowercase ( self : Dict ):
if self.task == "multiple-choice":
_snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 40 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=8 ):
'''simple docstring'''
lowercase__ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=512 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=512 ):
'''simple docstring'''
lowercase__ : Dict = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase__ : Optional[Any] = np.array(pil_image.convert('RGB' ) )
lowercase__ : Union[str, Any] = arr.astype(np.floataa ) / 127.5 - 1
lowercase__ : str = np.transpose(SCREAMING_SNAKE_CASE_ , [2, 0, 1] )
lowercase__ : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
return image
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , a , a , a , ):
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels) - 1)
def snake_case_ ( self , a , a , a):
# get the original timestep using init_timestep
lowercase__ : Optional[Any] = min(int(num_inference_steps * strength) , a)
lowercase__ : Tuple = max(num_inference_steps - init_timestep , 0)
lowercase__ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case_ ( self , a , a , a , a , a , a , a=None):
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a)}""")
lowercase__ : Optional[Any] = image.to(device=a , dtype=a)
lowercase__ : Tuple = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase__ : Dict = image
else:
if isinstance(a , a) and len(a) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a)}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
elif isinstance(a , a):
lowercase__ : Tuple = [
self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(a)
]
lowercase__ : Optional[int] = torch.cat(a , dim=0)
else:
lowercase__ : Union[str, Any] = self.movq.encode(a).latent_dist.sample(a)
lowercase__ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase__ : Union[str, Any] = torch.cat([init_latents] , dim=0)
lowercase__ : Union[str, Any] = init_latents.shape
lowercase__ : Tuple = randn_tensor(a , generator=a , device=a , dtype=a)
# get latents
lowercase__ : Optional[Any] = self.scheduler.add_noise(a , a , a)
lowercase__ : Union[str, Any] = init_latents
return latents
def snake_case_ ( self , a=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`')
lowercase__ : Dict = torch.device(f"""cuda:{gpu_id}""")
lowercase__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a)
def snake_case_ ( self , a=0):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0'):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.')
lowercase__ : Optional[int] = torch.device(f"""cuda:{gpu_id}""")
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=a)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ : Optional[Any] = cpu_offload_with_hook(a , a , prev_module_hook=a)
# We'll offload the last model manually.
lowercase__ : str = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self):
if not hasattr(self.unet , '_hf_hook'):
return self.device
for module in self.unet.modules():
if (
hasattr(a , '_hf_hook')
and hasattr(module._hf_hook , 'execution_device')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(a)
def __call__( self , a , a , a , a = 512 , a = 512 , a = 100 , a = 4.0 , a = 0.3 , a = 1 , a = None , a = "pil" , a = True , ):
lowercase__ : Any = self._execution_device
lowercase__ : str = guidance_scale > 1.0
if isinstance(a , a):
lowercase__ : List[str] = torch.cat(a , dim=0)
lowercase__ : str = image_embeds.shape[0]
if isinstance(a , a):
lowercase__ : Optional[int] = torch.cat(a , dim=0)
if do_classifier_free_guidance:
lowercase__ : Optional[int] = image_embeds.repeat_interleave(a , dim=0)
lowercase__ : Tuple = negative_image_embeds.repeat_interleave(a , dim=0)
lowercase__ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=a)
if not isinstance(a , a):
lowercase__ : str = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor)) for i in image):
raise ValueError(
f"""Input is in incorrect format: {[type(a) for i in image]}. Currently, we only support PIL image and pytorch tensor""")
lowercase__ : List[Any] = torch.cat([prepare_image(a , a , a) for i in image] , dim=0)
lowercase__ : Tuple = image.to(dtype=image_embeds.dtype , device=a)
lowercase__ : str = self.movq.encode(a)['latents']
lowercase__ : Optional[int] = latents.repeat_interleave(a , dim=0)
self.scheduler.set_timesteps(a , device=a)
lowercase__ , lowercase__ : str = self.get_timesteps(a , a , a)
lowercase__ : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt)
lowercase__ , lowercase__ : Any = downscale_height_and_width(a , a , self.movq_scale_factor)
lowercase__ : Optional[Any] = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a)
for i, t in enumerate(self.progress_bar(a)):
# expand the latents if we are doing classifier free guidance
lowercase__ : int = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase__ : int = {'image_embeds': image_embeds}
lowercase__ : int = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ : Any = noise_pred.split(latents.shape[1] , dim=1)
lowercase__ , lowercase__ : Optional[Any] = noise_pred.chunk(2)
lowercase__ , lowercase__ : Union[str, Any] = variance_pred.chunk(2)
lowercase__ : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , 'variance_type')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : int = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase__ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a)['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""")
if output_type in ["np", "pil"]:
lowercase__ : Union[str, Any] = image * 0.5 + 0.5
lowercase__ : int = image.clamp(0 , 1)
lowercase__ : str = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
lowercase__ : str = self.numpy_to_pil(a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a)
| 214 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 214 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowercase : List[Any] = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
lowercase : List[Any] = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
lowercase : Union[str, Any] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ) ,id="""references""" ),
} ) ,codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] ,reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case=4 ,snake_case=False ):
'''simple docstring'''
lowercase : int = compute_bleu(
reference_corpus=snake_case ,translation_corpus=snake_case ,max_order=snake_case ,smooth=snake_case )
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Any = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 285 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 285 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = None
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :int=None , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :List[Any]="<s>" , lowerCamelCase_ :str="</s>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Dict=False , **lowerCamelCase_ :List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : List[Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =add_prefix_space
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [self.eos_token_id] )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : List[str] =input_ids[-self.model_max_length :]
return input_ids | 126 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :int=13 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Any=3 , lowerCamelCase_ :Union[str, Any]=4 , lowerCamelCase_ :int=[10, 20, 30, 40] , lowerCamelCase_ :Dict=[2, 2, 3, 2] , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Any=37 , lowerCamelCase_ :Dict="gelu" , lowerCamelCase_ :List[Any]=10 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Optional[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ :List[str]=[2, 3, 4] , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =parent
lowerCamelCase__ : Optional[int] =batch_size
lowerCamelCase__ : List[Any] =image_size
lowerCamelCase__ : Dict =num_channels
lowerCamelCase__ : Optional[int] =num_stages
lowerCamelCase__ : Optional[int] =hidden_sizes
lowerCamelCase__ : Optional[Any] =depths
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : Optional[Any] =use_labels
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Dict =num_labels
lowerCamelCase__ : Union[str, Any] =initializer_range
lowerCamelCase__ : Dict =out_features
lowerCamelCase__ : List[str] =out_indices
lowerCamelCase__ : Any =scope
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Any =None
if self.use_labels:
lowerCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : Tuple =self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :Dict , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =ConvNextVaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =ConvNextVaForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Any =model(lowerCamelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : Dict =ConvNextVaBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : List[str] =model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Optional[int] ={'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaModelTester(self )
lowerCamelCase__ : Tuple =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : List[str] =True
if model_class.__name__ in [
*get_values(lowerCamelCase_ ),
*get_values(lowerCamelCase_ ),
]:
continue
lowerCamelCase__ : Any =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
lowerCamelCase__ : Union[str, Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : Optional[Any] =True
if (
model_class.__name__
in [*get_values(lowerCamelCase_ ), *get_values(lowerCamelCase_ )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ : List[str] =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ : List[Any] =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict =model(**lowerCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[Any] =model_class(lowerCamelCase_ )
lowerCamelCase__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] ):
lowerCamelCase__ : Tuple =model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[int] =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
lowerCamelCase__ : int =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Union[str, Any] =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] =ConvNextVaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase_ ( ) ->List[str]:
lowerCamelCase__ : Tuple =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : int =ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.default_image_processor
lowerCamelCase__ : int =prepare_img()
lowerCamelCase__ : List[Any] =preprocessor(images=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : List[str] =model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Union[str, Any] =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
lowerCamelCase__ : Dict =torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) ) | 126 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( __lowercase ):
'''simple docstring'''
A_ : Optional[int] = ['''input_features''', '''attention_mask''']
def __init__( self , __UpperCAmelCase=80 , __UpperCAmelCase=16000 , __UpperCAmelCase=80 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Optional[int]:
super().__init__(feature_size=UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , padding_value=UpperCAmelCase__ , **UpperCAmelCase__ )
_a = num_mel_bins
_a = do_ceptral_normalize
_a = normalize_means
_a = normalize_vars
_a = True
def _UpperCAmelCase ( self , __UpperCAmelCase , ) -> np.ndarray:
_a = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
_a = torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 )
_a = ta_kaldi.fbank(UpperCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = True , __UpperCAmelCase = 0.0 , ) -> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
_a = x[:input_length].mean(axis=0 )
_a = np.subtract(UpperCAmelCase__ , UpperCAmelCase__ )
if normalize_vars:
_a = x[:input_length].std(axis=0 )
_a = np.divide(UpperCAmelCase__ , UpperCAmelCase__ )
if input_length < x.shape[0]:
_a = padding_value
# make sure array is in float32
_a = x.astype(np.floataa )
return x
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[np.ndarray]:
_a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase__ , UpperCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase__ , UpperCAmelCase__ )
]
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_a = isinstance(UpperCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(UpperCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase__ , np.ndarray ):
_a = np.asarray(UpperCAmelCase__ , dtype=np.floataa )
elif isinstance(UpperCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [raw_speech]
# extract fbank features
_a = [self._extract_fbank_features(UpperCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
_a = BatchFeature({'''input_features''': features} )
_a = self.pad(
UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
# make sure list is in array format
_a = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCAmelCase__ ):
_a = [np.asarray(UpperCAmelCase__ , dtype=np.floataa ) for feature in input_features]
_a = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
_a = [np.asarray(UpperCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_a = (
np.array(UpperCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase__ , max_length=UpperCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_a = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCAmelCase__ )
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(UpperCAmelCase__ )
return padded_inputs | 361 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : List[str]=None, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
_a = True
while ask_again:
_a = input(_lowerCAmelCase )
try:
if default is not None and len(_lowerCAmelCase ) == 0:
return default
return convert_value(_lowerCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Optional[Any]=[], _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Dict=0 ):
"""simple docstring"""
_a = BulletMenu(_lowerCAmelCase, _lowerCAmelCase )
_a = menu.run(default_choice=_lowerCAmelCase )
return convert_value(_lowerCAmelCase ) if convert_value is not None else result
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = super()._format_usage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 153 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : int=7 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : List[str]=1_8 , _lowerCAmelCase : str=3_0 , _lowerCAmelCase : int=4_0_0 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : List[str]=[0.5, 0.5, 0.5] , _lowerCAmelCase : str=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
snake_case_ = size if size is not None else {"height": 1_8, "width": 1_8}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
def lowerCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = DPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
snake_case_ = DPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def lowerCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case_ = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case_ = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
# Initialize image_processing
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case_ = image_processing(_lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 159 |
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :Tuple , lowerCAmelCase_ :Any )->List[Any]:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowerCAmelCase_ , n - 1 , lowerCAmelCase_ ) * a) % mod
else:
snake_case_ = binary_exponentiation(lowerCAmelCase_ , n / 2 , lowerCAmelCase_ )
return (b * b) % mod
# a prime number
SCREAMING_SNAKE_CASE :List[str] = 7_01
SCREAMING_SNAKE_CASE :Optional[int] = 10_00_00_00_00
SCREAMING_SNAKE_CASE :Tuple = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 159 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A__ : List[Any] =vocab_size
A__ : Tuple =d_embed
A__ : str =d_proj
A__ : List[Any] =cutoffs + [vocab_size]
A__ : List[Any] =[0] + self.cutoffs
A__ : int =div_val
A__ : List[str] =self.cutoffs[0]
A__ : Union[str, Any] =len(self.cutoffs ) - 1
A__ : List[str] =self.shortlist_size + self.n_clusters
A__ : List[Any] =keep_order
A__ : Optional[Any] =[]
A__ : int =[]
def lowercase__ ( self : int , lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
if self.n_clusters > 0:
A__ : Union[str, Any] =self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name="""cluster_weight""" )
A__ : Optional[int] =self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A__ : List[Any] =self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=f"out_projs_._{i}" , )
self.out_projs.append(lowerCAmelCase_ )
else:
self.out_projs.append(lowerCAmelCase_ )
A__ : Optional[Any] =self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=f"out_layers_._{i}_._weight" , )
A__ : int =self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A__ , A__ : Tuple =self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : List[str] =self.d_embed // (self.div_val**i)
A__ : Dict =self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=f"out_projs_._{i}" )
self.out_projs.append(lowerCAmelCase_ )
A__ : Union[str, Any] =self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=f"out_layers_._{i}_._weight" , )
A__ : str =self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCAmelCase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase_ )
@staticmethod
def lowercase__ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=None ) -> List[Any]:
'''simple docstring'''
A__ : Dict =x
if proj is not None:
A__ : Union[str, Any] =tf.einsum("""ibd,ed->ibe""" , lowerCAmelCase_ , lowerCAmelCase_ )
return tf.einsum("""ibd,nd->ibn""" , lowerCAmelCase_ , lowerCAmelCase_ ) + b
@staticmethod
def lowercase__ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ : List[Any] =shape_list(lowerCAmelCase_ )
A__ : Tuple =tf.range(lp_size[0] , dtype=target.dtype )
A__ : List[Any] =tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=False ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =0
if self.n_clusters == 0:
A__ : Optional[int] =self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A__ : Any =tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
A__ : Optional[int] =tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 )
else:
A__ : Tuple =shape_list(lowerCAmelCase_ )
A__ : str =[]
A__ : Dict =tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A__ , A__ : Dict =self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A__ : Tuple =(target >= l_idx) & (target < r_idx)
A__ : str =tf.where(lowerCAmelCase_ )
A__ : List[str] =tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx
if self.div_val == 1:
A__ : int =self.out_layers[0][0][l_idx:r_idx]
A__ : Dict =self.out_layers[0][1][l_idx:r_idx]
else:
A__ : int =self.out_layers[i][0]
A__ : Union[str, Any] =self.out_layers[i][1]
if i == 0:
A__ : Optional[Any] =tf.concat([cur_W, self.cluster_weight] , 0 )
A__ : List[Any] =tf.concat([cur_b, self.cluster_bias] , 0 )
A__ : Dict =self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] )
A__ : Tuple =tf.nn.log_softmax(lowerCAmelCase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A__ : List[str] =tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
else:
A__ : Dict =self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] )
A__ : Optional[Any] =tf.nn.log_softmax(lowerCAmelCase_ )
A__ : Optional[Any] =self.cutoffs[0] + i - 1 # No probability for the head cluster
A__ : List[Any] =head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase_ )
if target is not None:
A__ : Union[str, Any] =tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Union[str, Any] =tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : List[Any] =self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) )
A__ : Optional[Any] =tf.concat(lowerCAmelCase_ , axis=-1 )
if target is not None:
if return_mean:
A__ : Dict =tf.reduce_mean(lowerCAmelCase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 136 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCamelCase :
'''simple docstring'''
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
A__ : str =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : int =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Union[str, Any] =UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Dict =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : Union[str, Any] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] =TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : str =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
A__ : Dict =UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ : Optional[int] =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
A__ : int =DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
A__ : List[str] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : Tuple =self.get_dummy_components()
A__ : str =self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Optional[int] =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : str =inputs["""prompt"""]
A__ : Optional[int] =inputs["""generator"""]
A__ : Optional[Any] =inputs["""num_inference_steps"""]
A__ : Union[str, Any] =inputs["""output_type"""]
if "image" in inputs:
A__ : Union[str, Any] =inputs["""image"""]
else:
A__ : List[Any] =None
if "mask_image" in inputs:
A__ : Union[str, Any] =inputs["""mask_image"""]
else:
A__ : Tuple =None
if "original_image" in inputs:
A__ : Optional[Any] =inputs["""original_image"""]
else:
A__ : Tuple =None
A__ , A__ : Optional[Any] =pipe.encode_prompt(lowerCAmelCase_ )
# inputs with prompt converted to embeddings
A__ : Optional[int] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : int =image
if mask_image is not None:
A__ : Tuple =mask_image
if original_image is not None:
A__ : Optional[int] =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Any =pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : int =self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase_ , lowerCAmelCase_ ) is None , f"`{optional_component}` did not stay set to None after loading." , )
A__ : Dict =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : int =inputs["""generator"""]
A__ : str =inputs["""num_inference_steps"""]
A__ : Optional[Any] =inputs["""output_type"""]
# inputs with prompt converted to embeddings
A__ : List[Any] ={
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
A__ : int =image
if mask_image is not None:
A__ : int =mask_image
if original_image is not None:
A__ : Optional[int] =original_image
A__ : List[str] =pipe_loaded(**lowerCAmelCase_ )[0]
A__ : Union[str, Any] =np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =self.get_dummy_components()
A__ : int =self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : List[Any] =pipe(**lowerCAmelCase_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : List[Any] =self.pipeline_class.from_pretrained(lowerCAmelCase_ )
pipe_loaded.to(lowerCAmelCase_ )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A__ : int =self.get_dummy_inputs(lowerCAmelCase_ )
A__ : Tuple =pipe_loaded(**lowerCAmelCase_ )[0]
A__ : Tuple =np.abs(to_np(lowerCAmelCase_ ) - to_np(lowerCAmelCase_ ) ).max()
self.assertLess(lowerCAmelCase_ , 1e-4 )
| 136 | 1 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40 | 0 |
def _UpperCAmelCase (UpperCamelCase_ : list[int] ):
'''simple docstring'''
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
_lowerCAmelCase : List[str] = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
_lowerCAmelCase : Tuple = numbers[i]
if number < 0:
_lowerCAmelCase : Optional[Any] = min_till_now, max_till_now
_lowerCAmelCase : Union[str, Any] = max(UpperCAmelCase__ , max_till_now * number )
_lowerCAmelCase : List[Any] = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
_lowerCAmelCase : List[str] = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 371 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class __snake_case (_a ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = SMALL_MODEL_IDENTIFIER
_lowerCAmelCase : str = """pt"""
_lowerCAmelCase : List[Any] = """tf"""
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase )
model_tf.save_pretrained(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """mock_framework"""
# Framework provided - return whatever the user provides
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase )
_lowerCAmelCase : str = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase )
_lowerCAmelCase : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_lowerCAmelCase : str = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
_lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase : Tuple = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
_lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
_lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase )
with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch(
"""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ):
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
| 159 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_UpperCAmelCase : Dict = """CompVis/stable-diffusion-v1-1"""
_UpperCAmelCase : Tuple = """CompVis/stable-diffusion-v1-2"""
_UpperCAmelCase : Dict = """CompVis/stable-diffusion-v1-3"""
_UpperCAmelCase : Union[str, Any] = """CompVis/stable-diffusion-v1-4"""
class lowercase ( lowercase_ ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ):
super()._init_()
snake_case_ = StableDiffusionPipeline.from_pretrained(snake_case )
snake_case_ = StableDiffusionPipeline.from_pretrained(snake_case )
snake_case_ = StableDiffusionPipeline.from_pretrained(snake_case )
snake_case_ = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def a ( self ):
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith('_' )}
def a ( self , snake_case = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def a ( self ):
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def a ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def a ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def a ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def a ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def a ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ):
snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case_ = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case_ = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case_ = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case_ = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 285 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285 | 1 |
from itertools import product
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
a :str = sides_number
a :Any = max_face_number * dice_number
a :str = [0] * (max_total + 1)
a :Union[str, Any] = 1
a :Tuple = range(UpperCAmelCase_ , max_face_number + 1 )
for dice_numbers in product(UpperCAmelCase_ , repeat=UpperCAmelCase_ ):
a :Any = sum(UpperCAmelCase_ )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ):
"""simple docstring"""
a :Union[str, Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a :Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a :Union[str, Any] = 0
a :Any = 9
a :List[Any] = 4 * 9
a :Optional[int] = 6
for peter_total in range(UpperCAmelCase_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a :str = (4**9) * (6**6)
a :Optional[Any] = peter_wins_count / total_games_number
a :Tuple = round(UpperCAmelCase_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 281 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__ ( __snake_case ):
def __init__( self , *A_ , A_=None , A_=None , **A_ ):
'''simple docstring'''
super().__init__(*A_ , **A_ )
UpperCamelCase : str = eval_examples
UpperCamelCase : int = post_process_function
def __UpperCamelCase( self , A_=None , A_=None , A_=None , A_ = "eval" ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : str = self.get_eval_dataloader(A_ )
UpperCamelCase : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Dict = self.compute_metrics
UpperCamelCase : Dict = None
UpperCamelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : List[str] = time.time()
try:
UpperCamelCase : Optional[int] = eval_loop(
A_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , )
finally:
UpperCamelCase : Optional[Any] = compute_metrics
UpperCamelCase : Tuple = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Tuple = self.post_process_function(A_ , A_ , output.predictions )
UpperCamelCase : Tuple = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCamelCase : Tuple = metrics.pop(A_ )
metrics.update(output.metrics )
else:
UpperCamelCase : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_ )
return metrics
def __UpperCamelCase( self , A_ , A_ , A_=None , A_ = "test" ):
'''simple docstring'''
UpperCamelCase : Any = self.get_test_dataloader(A_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Tuple = time.time()
try:
UpperCamelCase : List[Any] = eval_loop(
A_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , metric_key_prefix=A_ , )
finally:
UpperCamelCase : List[str] = compute_metrics
UpperCamelCase : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
A_ , A_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : List[str] = self.post_process_function(A_ , A_ , output.predictions , "predict" )
UpperCamelCase : List[str] = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCamelCase : str = metrics.pop(A_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_ )
| 52 |
"""simple docstring"""
class _lowerCamelCase :
def __init__(self , __a ) -> None:
UpperCamelCase = len(__a )
UpperCamelCase = [0] * len_array
if len_array > 0:
UpperCamelCase = array[0]
for i in range(1 , __a ):
UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def snake_case_ (self , __a , __a ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ (self , __a ) -> bool:
UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
return "".join(chr(ord(lowerCAmelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod() | 275 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase )
if number < 0:
return False
__magic_name__ : Tuple = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 275 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase :Union[str, Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowerCamelCase :Tuple = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowerCamelCase :str = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = float(pearsonr(_lowerCamelCase , _lowerCamelCase )[0] )
A_ : Optional[Any] = float(spearmanr(_lowerCamelCase , _lowerCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _a (self , lowercase , lowercase ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" ) | 206 | """simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = '''pt'''
elif is_tf_available():
__A = '''tf'''
else:
__A = '''jax'''
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = PerceiverTokenizer
snake_case__ = False
def lowerCamelCase__ ( self : List[str] ):
super().setUp()
__lowerCamelCase : List[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase__ ( self : Dict ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def lowerCamelCase__ ( self : List[Any] , **UpperCAmelCase : str ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : str=20 , UpperCAmelCase : Union[str, Any]=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCamelCase : Dict = []
for i in range(len(UpperCAmelCase ) ):
try:
__lowerCamelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase : Any = list(filter(lambda UpperCAmelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , UpperCAmelCase ) )
__lowerCamelCase : str = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCAmelCase ) , UpperCAmelCase ) )
if max_length is not None and len(UpperCAmelCase ) > max_length:
__lowerCamelCase : Optional[int] = toks[:max_length]
if min_length is not None and len(UpperCAmelCase ) < min_length and len(UpperCAmelCase ) > 0:
while len(UpperCAmelCase ) < min_length:
__lowerCamelCase : int = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase : str = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase : Optional[int] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
if " " not in output_txt and len(UpperCAmelCase ) > 1:
__lowerCamelCase : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCAmelCase )
)
if with_prefix_space:
__lowerCamelCase : Optional[int] = " " + output_txt
__lowerCamelCase : List[Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
return output_txt, output_ids
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = self.perceiver_tokenizer
__lowerCamelCase : Optional[int] = "Unicode €."
__lowerCamelCase : Union[str, Any] = tokenizer(UpperCAmelCase )
__lowerCamelCase : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , UpperCAmelCase )
# decoding
__lowerCamelCase : List[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , "[CLS]Unicode €.[SEP]" )
__lowerCamelCase : Optional[Any] = tokenizer("e è é ê ë" )
__lowerCamelCase : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , UpperCAmelCase )
# decoding
__lowerCamelCase : Optional[Any] = tokenizer.decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Dict = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__lowerCamelCase : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__lowerCamelCase : Dict = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
if FRAMEWORK != "jax":
__lowerCamelCase : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.perceiver_tokenizer
__lowerCamelCase : Optional[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowerCamelCase : str = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors=UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , UpperCAmelCase )
self.assertIn("attention_mask" , UpperCAmelCase )
self.assertNotIn("decoder_input_ids" , UpperCAmelCase )
self.assertNotIn("decoder_attention_mask" , UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : str = self.perceiver_tokenizer
__lowerCamelCase : Union[str, Any] = [
"Summary of the text.",
"Another summary.",
]
__lowerCamelCase : int = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding="max_length" , truncation=UpperCAmelCase , return_tensors=UpperCAmelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCamelCase__ ( self : str ):
# safety check on max_len default value so we are sure the test works
__lowerCamelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : Optional[int] = " He is very happy, UNwant\u00E9d,running"
__lowerCamelCase : Any = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Tuple = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
__lowerCamelCase : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Dict = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__lowerCamelCase : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__lowerCamelCase : Tuple = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase )
__lowerCamelCase : Optional[int] = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__lowerCamelCase : Optional[int] = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__lowerCamelCase : Any = json.load(UpperCAmelCase )
__lowerCamelCase : Tuple = [F"""<extra_id_{i}>""" for i in range(125 )]
__lowerCamelCase : Dict = added_tokens_extra_ids + [
"an_additional_special_token"
]
__lowerCamelCase : Any = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(UpperCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(
UpperCAmelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase : List[Any] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=UpperCAmelCase )]
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : str = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def lowerCamelCase__ ( self : List[str] ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : Optional[int] ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__lowerCamelCase : List[str] = self.get_tokenizers(fast=UpperCAmelCase , do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
__lowerCamelCase : Any = tokenizer.convert_tokens_to_string(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) | 135 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __a ( snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'ssube/stable-diffusion-x4-upscaler-onnx'
def _lowerCAmelCase ( self : Dict , lowercase_ : str=0 ):
UpperCamelCase__ : List[Any] =floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_ ) )
UpperCamelCase__ : Any =torch.manual_seed(lowercase_ )
UpperCamelCase__ : str ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : List[Any] =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Any =self.get_dummy_inputs()
UpperCamelCase__ : str =pipe(**lowercase_ ).images
UpperCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Tuple =np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ : str =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase__ : Dict =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : str =self.get_dummy_inputs()
UpperCamelCase__ : Tuple =pipe(**lowercase_ ).images
UpperCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Optional[Any] =np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : int =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase__ : Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.get_dummy_inputs()
UpperCamelCase__ : Optional[Any] =pipe(**lowercase_ ).images
UpperCamelCase__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Optional[int] =np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : int =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase__ : Optional[int] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : str =self.get_dummy_inputs()
UpperCamelCase__ : Dict =pipe(**lowercase_ ).images
UpperCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : List[str] =np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Optional[Any] =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCamelCase__ : Union[str, Any] =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : Optional[Any] =self.get_dummy_inputs()
UpperCamelCase__ : Tuple =pipe(**lowercase_ ).images
UpperCamelCase__ : Dict =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : str =np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : str =ort.SessionOptions()
UpperCamelCase__ : Union[str, Any] =False
return options
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase__ : Optional[Any] =init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase__ : Any =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : int ='''A fantasy landscape, trending on artstation'''
UpperCamelCase__ : List[Any] =torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] =pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
UpperCamelCase__ : Optional[Any] =output.images
UpperCamelCase__ : Union[str, Any] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase__ : Union[str, Any] =np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCamelCase__ : List[str] =init_image.resize((128, 128) )
UpperCamelCase__ : Tuple =LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
UpperCamelCase__ : Tuple =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : str ='''A fantasy landscape, trending on artstation'''
UpperCamelCase__ : Optional[Any] =torch.manual_seed(0 )
UpperCamelCase__ : Tuple =pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
UpperCamelCase__ : Dict =output.images
UpperCamelCase__ : Union[str, Any] =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase__ : str =np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 157 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
UpperCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 | 1 |
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [0 for i in range(len(lowerCAmelCase ) )]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = 0, 0
for i in range(1 , len(lowerCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE_ : List[str] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
SCREAMING_SNAKE_CASE_ : str = min_edge
while go_next(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = i, i + z_result[i] - 1
return z_result
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : str ):
"""simple docstring"""
return i + z_result[i] < len(lowerCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE_ : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case_ = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case_ = self.block_out_channels[i]
snake_case_ = self.block_out_channels[i + 1]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowerCAmelCase )
snake_case_ = blocks
snake_case_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
for block in self.blocks:
snake_case_ = block(_lowerCAmelCase )
snake_case_ = nn.silu(_lowerCAmelCase )
snake_case_ = self.conv_out(_lowerCAmelCase )
return embedding
@flax_register_to_config
class __lowerCAmelCase ( nn.Module , a , a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = (320, 640, 1280, 1280)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1280
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = "rgb"
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
snake_case_ , snake_case_ = jax.random.split(_lowerCAmelCase )
snake_case_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def lowerCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
snake_case_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case_ = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = []
snake_case_ = block_out_channels[0]
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
for _ in range(self.layers_per_block ):
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
if not is_final_block:
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowerCAmelCase )
snake_case_ = down_blocks
snake_case_ = controlnet_down_blocks
# mid
snake_case_ = block_out_channels[-1]
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=_lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case_ = nn.Conv(
_lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
snake_case_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case_ = jnp.flip(_lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(_lowerCAmelCase , 0 )
snake_case_ = self.time_proj(_lowerCAmelCase )
snake_case_ = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.conv_in(_lowerCAmelCase )
snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
snake_case_ = self.controlnet_cond_embedding(_lowerCAmelCase )
sample += controlnet_cond
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
snake_case_ = ()
for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase , self.controlnet_down_blocks ):
snake_case_ = controlnet_block(_lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case_ = controlnet_down_block_res_samples
snake_case_ = self.controlnet_mid_block(_lowerCAmelCase )
# 6. scaling
snake_case_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_lowerCAmelCase , mid_block_res_sample=_lowerCAmelCase )
| 159 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_12,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __lowercase ( __lowerCAmelCase : Tuple ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'could not parse string as bool {string}' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
snake_case : Optional[Any] = parser.parse_args()
snake_case : Any = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 109 |
from __future__ import annotations
class snake_case_ :
def __init__( self :List[str] ,__snake_case :int ) -> None:
a__ = data
a__ = None
a__ = None
def __lowercase ( __lowerCAmelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowercase ( __lowerCAmelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowercase ( __lowerCAmelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowercase ( ): # Main function for testing.
a__ = Node(1 )
a__ = Node(2 )
a__ = Node(3 )
a__ = Node(4 )
a__ = Node(5 )
a__ = Node(6 )
a__ = Node(7 )
a__ = Node(8 )
a__ = Node(9 )
print(is_full_binary_tree(__lowerCAmelCase ) )
print(depth_of_tree(__lowerCAmelCase ) )
print('Tree is: ' )
display(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 109 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
snake_case : Any = logging.get_logger(__name__)
snake_case : Optional[int] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> Tuple:
'''simple docstring'''
__magic_name__ : int = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__magic_name__ : Any = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ : Tuple = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_snake_case , output_all_encodings=_snake_case , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _snake_case ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ : List[str] = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ : Optional[Any] = os.path.join(get_home_dir() , "models" )
__magic_name__ : Tuple = _load_vocab(_snake_case , _snake_case , _snake_case , cls=_snake_case )
__magic_name__ : Any = nlp.model.BERTModel(
_snake_case , len(_snake_case ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_snake_case , use_token_type_embed=_snake_case , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_snake_case , use_decoder=_snake_case , )
original_bort.load_parameters(_snake_case , cast_dtype=_snake_case , ignore_extra=_snake_case )
__magic_name__ : str = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ : List[str] = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_snake_case ),
}
__magic_name__ : Any = BertConfig.from_dict(_snake_case )
__magic_name__ : Tuple = BertForMaskedLM(_snake_case )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_snake_case : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_snake_case : int , _snake_case : Union[str, Any] ):
__magic_name__ : Tuple = hf_param.shape
__magic_name__ : str = to_torch(params[gluon_param] )
__magic_name__ : str = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__magic_name__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ : BertSelfAttention = layer.attention.self
__magic_name__ : str = check_and_map_params(
self_attn.key.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__magic_name__ : str = check_and_map_params(
self_attn.key.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__magic_name__ : Optional[Any] = check_and_map_params(
self_attn.query.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__magic_name__ : Optional[int] = check_and_map_params(
self_attn.query.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__magic_name__ : int = check_and_map_params(
self_attn.value.bias.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__magic_name__ : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__magic_name__ : BertSelfOutput = layer.attention.output
__magic_name__ : Any = check_and_map_params(
self_output.dense.bias , F'''encoder.transformer_cells.{i}.proj.bias''' )
__magic_name__ : int = check_and_map_params(
self_output.dense.weight , F'''encoder.transformer_cells.{i}.proj.weight''' )
__magic_name__ : Union[str, Any] = check_and_map_params(
self_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__magic_name__ : List[Any] = check_and_map_params(
self_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__magic_name__ : BertIntermediate = layer.intermediate
__magic_name__ : Dict = check_and_map_params(
intermediate.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__magic_name__ : Tuple = check_and_map_params(
intermediate.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__magic_name__ : BertOutput = layer.output
__magic_name__ : List[Any] = check_and_map_params(
bert_output.dense.bias , F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__magic_name__ : Any = check_and_map_params(
bert_output.dense.weight , F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__magic_name__ : List[Any] = check_and_map_params(
bert_output.LayerNorm.bias , F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__magic_name__ : List[Any] = check_and_map_params(
bert_output.LayerNorm.weight , F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ : int = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ : Tuple = tokenizer.encode_plus(_snake_case )["input_ids"]
# Get gluon output
__magic_name__ : Any = mx.nd.array([input_ids] )
__magic_name__ : str = original_bort(inputs=_snake_case , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_snake_case )
__magic_name__ : Dict = BertModel.from_pretrained(_snake_case )
hf_bort_model.eval()
__magic_name__ : Optional[Any] = tokenizer.encode_plus(_snake_case , return_tensors="pt" )
__magic_name__ : str = hf_bort_model(**_snake_case )[0]
__magic_name__ : Union[str, Any] = output_gluon[0].asnumpy()
__magic_name__ : List[str] = output_hf[0].detach().numpy()
__magic_name__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ : Optional[int] = np.allclose(_snake_case , _snake_case , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , _snake_case )
if __name__ == "__main__":
snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case : int = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 281 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Optional[Any] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase_ ( _snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( _snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( _snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__magic_name__ : int = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : Optional[Any] = Node(4 )
__magic_name__ : Union[str, Any] = Node(5 )
__magic_name__ : Any = Node(6 )
__magic_name__ : int = Node(7 )
__magic_name__ : List[str] = Node(8 )
__magic_name__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("Tree is: " )
display(_snake_case )
if __name__ == "__main__":
main()
| 281 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "new-model"
if is_tf_available():
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = NewModelConfig
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = "bert-base-cased"
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Tuple = "bert-base-cased"
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Tuple = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Any = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->Dict:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase : int = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_44_10 )
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 1_44_10 )
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = copy.deepcopy(model.config )
_UpperCAmelCase : List[Any] = ["FunnelBaseModel"]
_UpperCAmelCase : Union[str, Any] = TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
try:
AutoConfig.register("new-model" , lowerCamelCase__ )
_UpperCAmelCase : List[str] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase : Tuple = BertModelTester(self ).get_config()
_UpperCAmelCase : Union[str, Any] = NewModelConfig(**tiny_config.to_dict() )
_UpperCAmelCase : Tuple = auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase : int = auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
_UpperCAmelCase : Any = TFAutoModel.from_pretrained("bert-base" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_UpperCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained(lowerCamelCase__ , revision="aaaaaa" )
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
_UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase__ , "Use `from_pt=True` to load this model" ):
_UpperCAmelCase : str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
_UpperCAmelCase : Tuple = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
_UpperCAmelCase : str = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
_UpperCAmelCase : str = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 322 |
'''simple docstring'''
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Optional[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[Any] = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("CPU" , font_size=24 )
_UpperCAmelCase : Any = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCAmelCase : str = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : int = Text("GPU" , font_size=24 )
_UpperCAmelCase : str = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("Model" , font_size=24 )
_UpperCAmelCase : Tuple = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
_UpperCAmelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
_UpperCAmelCase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCAmelCase : int = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : int = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCAmelCase : Dict = 0.4_6 / 4
_UpperCAmelCase : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 322 | 1 |
def _lowercase ( lowercase__ ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Union[str, Any] = number
while duplicate > 0:
__lowerCAmelCase, __lowerCAmelCase : Any = divmod(lowercase__ , 1_0 )
fact_sum += factorial(lowercase__ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
_UpperCamelCase = int(input("Enter number: ").strip())
print(
F"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 275 |
def _lowercase ( lowercase__ , lowercase__ ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 275 | 1 |
from math import ceil, sqrt
def lowerCAmelCase_ ( snake_case_ = 1000000 ):
_A : Optional[Any] = 0
for outer_width in range(3,(limit // 4) + 2 ):
if outer_width**2 > limit:
_A : Union[str, Any] = max(ceil(sqrt(outer_width**2 - limit ) ),1 )
else:
_A : Optional[int] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 343 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = 0
if start < end:
_A : Tuple = randint(snake_case_,snake_case_ )
_A : Any = a[end]
_A : int = a[pivot]
_A : int = temp
_A , _A : List[Any] = _in_place_partition(snake_case_,snake_case_,snake_case_ )
count += _in_place_quick_sort(snake_case_,snake_case_,p - 1 )
count += _in_place_quick_sort(snake_case_,p + 1,snake_case_ )
return count
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : str = 0
_A : List[str] = randint(snake_case_,snake_case_ )
_A : Union[str, Any] = a[end]
_A : List[str] = a[pivot]
_A : List[Any] = temp
_A : List[str] = start - 1
for index in range(snake_case_,snake_case_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_A : Union[str, Any] = new_pivot_index + 1
_A : List[Any] = a[new_pivot_index]
_A : Optional[int] = a[index]
_A : List[Any] = temp
_A : Optional[Any] = a[new_pivot_index + 1]
_A : Any = a[end]
_A : Dict = temp
return new_pivot_index + 1, count
_snake_case = TemporaryFile()
_snake_case = 100 # 1000 elements are to be sorted
_snake_case , _snake_case = 0, 1 # mean and standard deviation
_snake_case = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_snake_case = np.load(outfile)
_snake_case = len(M) - 1
_snake_case = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 343 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Tuple=None , **__lowerCamelCase: Union[str, Any] ) -> Dict:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__UpperCAmelCase : Union[str, Any] = model
__UpperCAmelCase : Optional[Any] = kwargs.get("model_save_dir" , __lowerCamelCase )
__UpperCAmelCase : str = kwargs.get("latest_model_name" , __lowerCamelCase )
def __call__( self: int , **__lowerCamelCase: Optional[Any] ) -> int:
__UpperCAmelCase : Optional[Any] = {k: np.array(__lowerCamelCase ) for k, v in kwargs.items()}
return self.model.run(__lowerCamelCase , __lowerCamelCase )
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Union[str, Any]=None , __lowerCamelCase: Tuple=None ) -> List[str]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__UpperCAmelCase : Any = "CPUExecutionProvider"
return ort.InferenceSession(__lowerCamelCase , providers=[provider] , sess_options=__lowerCamelCase )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__UpperCAmelCase : str = self.model_save_dir.joinpath(self.latest_model_name )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__UpperCAmelCase : str = self.model_save_dir.joinpath(__lowerCamelCase )
if src_path.exists():
__UpperCAmelCase : List[str] = Path(__lowerCamelCase ).joinpath(__lowerCamelCase )
try:
shutil.copyfile(__lowerCamelCase , __lowerCamelCase )
except shutil.SameFileError:
pass
def _lowerCamelCase ( self: Any , __lowerCamelCase: Union[str, os.PathLike] , **__lowerCamelCase: Any , ) -> List[Any]:
if os.path.isfile(__lowerCamelCase ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
# saving model weights/files
self._save_pretrained(__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[Any] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: Optional[Union[bool, str, None]] = None , __lowerCamelCase: Optional[Union[str, None]] = None , __lowerCamelCase: bool = False , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional["ort.SessionOptions"] = None , **__lowerCamelCase: Union[str, Any] , ) -> Optional[Any]:
__UpperCAmelCase : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = OnnxRuntimeModel.load_model(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = Path(__lowerCamelCase )
# load model from hub
else:
# download model
__UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id=__lowerCamelCase , filename=__lowerCamelCase , use_auth_token=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , )
__UpperCAmelCase : Any = Path(__lowerCamelCase ).parent
__UpperCAmelCase : List[Any] = Path(__lowerCamelCase ).name
__UpperCAmelCase : Dict = OnnxRuntimeModel.load_model(__lowerCamelCase , provider=__lowerCamelCase , sess_options=__lowerCamelCase )
return cls(model=__lowerCamelCase , **__lowerCamelCase )
@classmethod
def _lowerCamelCase ( cls: Optional[int] , __lowerCamelCase: Union[str, Path] , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , **__lowerCamelCase: Tuple , ) -> Optional[Any]:
__UpperCAmelCase : int = None
if len(str(__lowerCamelCase ).split("@" ) ) == 2:
__UpperCAmelCase , __UpperCAmelCase : Any = model_id.split("@" )
return cls._from_pretrained(
model_id=__lowerCamelCase , revision=__lowerCamelCase , cache_dir=__lowerCamelCase , force_download=__lowerCamelCase , use_auth_token=__lowerCamelCase , **__lowerCamelCase , )
| 157 | def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = abs(snake_case__ )
__UpperCAmelCase : Dict = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Tuple = abs(snake_case__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase ( snake_case__ ) -> int:
return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) )
def _UpperCamelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : Union[str, Any] = f'''{func.__name__}({value})'''
__UpperCAmelCase : List[str] = timeit(f'''__main__.{call}''', setup="import __main__" )
print(f'''{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__, snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 157 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[int] = """hf-internal-testing/tiny-random-t5"""
__snake_case: Optional[int] = AutoTokenizer.from_pretrained(A )
__snake_case: List[Any] = AutoModelForSeqaSeqLM.from_pretrained(A )
__snake_case: Dict = tokenizer("""This is me""" , return_tensors="""pt""" )
__snake_case: Tuple = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__snake_case: Any = model.generate(**A )
__snake_case: List[str] = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
__snake_case: int = AutoModelForSeqaSeqLM.from_pretrained(A )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__snake_case: int = model_reloaded.generate(**A )
self.assertTrue(torch.allclose(A , A ) )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = """hf-internal-testing/tiny-random-t5"""
__snake_case: int = AutoModelForSeqaSeqLM.from_pretrained(A )
__snake_case: List[str] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(A ):
model.save_pretrained(A )
__snake_case: List[Any] = model.reverse_bettertransformer()
model.save_pretrained(A )
| 293 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A: Optional[Any] = "http://www.mocksite.com/file1.txt"
A: Union[str, Any] = "\"text\": [\"foo\", \"foo\"]"
A: List[Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : Dict = 200
__lowerCAmelCase : Optional[int] = {'Content-Length': '100'}
__lowerCAmelCase : Dict = {}
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return [bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )]
def _snake_case ( *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : int ):
import requests
monkeypatch.setattr(UpperCamelCase , """request""" , UpperCamelCase )
UpperCAmelCase : int = URL
if issubclass(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Tuple = url
elif issubclass(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Any = [url]
elif issubclass(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Any = {"""train""": url}
UpperCAmelCase : Tuple = """dummy"""
UpperCAmelCase : Dict = """downloads"""
UpperCAmelCase : Optional[Any] = tmp_path
UpperCAmelCase : Optional[int] = DownloadConfig(
cache_dir=os.path.join(UpperCamelCase , UpperCamelCase ) , use_etag=UpperCamelCase , )
UpperCAmelCase : str = DownloadManager(dataset_name=UpperCamelCase , download_config=UpperCamelCase )
UpperCAmelCase : Optional[Any] = dl_manager.download(UpperCamelCase )
UpperCAmelCase : List[str] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : int = [downloaded_paths]
UpperCAmelCase : Dict = [urls]
elif isinstance(UpperCamelCase , UpperCamelCase ):
assert "train" in downloaded_paths.keys()
UpperCAmelCase : Tuple = downloaded_paths.values()
UpperCAmelCase : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(UpperCamelCase , UpperCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
UpperCAmelCase : int = Path(UpperCamelCase )
UpperCAmelCase : List[str] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
UpperCAmelCase : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
UpperCAmelCase : Dict = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
UpperCAmelCase : int = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
UpperCAmelCase : Tuple = str(UpperCamelCase )
if issubclass(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Dict = filename
elif issubclass(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Optional[int] = [filename]
elif issubclass(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Dict = {"""train""": filename}
UpperCAmelCase : str = """dummy"""
UpperCAmelCase : Tuple = xz_file.parent
UpperCAmelCase : int = """extracted"""
UpperCAmelCase : Tuple = DownloadConfig(
cache_dir=UpperCamelCase , use_etag=UpperCamelCase , )
UpperCAmelCase : Dict = DownloadManager(dataset_name=UpperCamelCase , download_config=UpperCamelCase )
UpperCAmelCase : Tuple = dl_manager.extract(UpperCamelCase )
UpperCAmelCase : str = paths
for extracted_paths in [extracted_paths]:
if isinstance(UpperCamelCase , UpperCamelCase ):
UpperCAmelCase : Optional[int] = [extracted_paths]
UpperCAmelCase : int = [paths]
elif isinstance(UpperCamelCase , UpperCamelCase ):
assert "train" in extracted_paths.keys()
UpperCAmelCase : Union[str, Any] = extracted_paths.values()
UpperCAmelCase : Tuple = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(UpperCamelCase , UpperCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
UpperCAmelCase : Optional[Any] = Path(UpperCamelCase )
UpperCAmelCase : Optional[int] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(UpperCamelCase , etag=UpperCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
UpperCAmelCase : int = extracted_path.read_text()
UpperCAmelCase : Tuple = text_file.read_text()
assert extracted_file_content == expected_file_content
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Dict ):
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(UpperCamelCase , start=1 ):
UpperCAmelCase : Optional[int] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Tuple ):
UpperCAmelCase : Dict = request.getfixturevalue(UpperCamelCase )
UpperCAmelCase : int = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(UpperCamelCase ) , start=1 ):
_test_jsonl(UpperCamelCase , UpperCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : int ):
UpperCAmelCase : Optional[int] = request.getfixturevalue(UpperCamelCase )
UpperCAmelCase : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(UpperCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(UpperCamelCase ) , start=1 ):
_test_jsonl(UpperCamelCase , UpperCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : str = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(UpperCamelCase ) , start=1 ):
assert os.path.basename(UpperCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 109 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: List[Any] = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'blenderbot-small'
__lowerCAmelCase : Any = ['past_key_values']
__lowerCAmelCase : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : str = d_model
UpperCAmelCase : List[Any] = encoder_ffn_dim
UpperCAmelCase : Union[str, Any] = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : int = activation_dropout
UpperCAmelCase : int = activation_function
UpperCAmelCase : Optional[Any] = init_std
UpperCAmelCase : List[str] = encoder_layerdrop
UpperCAmelCase : List[str] = decoder_layerdrop
UpperCAmelCase : List[str] = use_cache
UpperCAmelCase : Union[str, Any] = encoder_layers
UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase : int = {0: """batch"""}
UpperCAmelCase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase : Tuple = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Union[str, Any] = super().outputs
else:
UpperCAmelCase : int = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase : Any = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
UpperCAmelCase : Optional[Any] = seq_length if not self.use_past else 1
UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase : Union[str, Any] = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase , UpperCAmelCase : int = common_inputs["""input_ids"""].shape
UpperCAmelCase : Any = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase , UpperCAmelCase : str = self.num_attention_heads
UpperCAmelCase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase : Union[str, Any] = decoder_seq_length + 3
UpperCAmelCase : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase : List[Any] = self.num_layers
UpperCAmelCase : Optional[Any] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
UpperCAmelCase : Optional[int] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
UpperCAmelCase : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase , UpperCAmelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase : Union[str, Any] = seqlen + 2
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.num_layers
UpperCAmelCase , UpperCAmelCase : Dict = self.num_attention_heads
UpperCAmelCase : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase : List[Any] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase : Any = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCAmelCase : List[str] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : List[str] = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase : Optional[int] = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : List[Any] = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Dict = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 109 | 1 |
from __future__ import annotations
import queue
class A_ :
'''simple docstring'''
def __init__( self: Any , a: Optional[int] ):
__lowerCamelCase : str = data
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : str = None
def UpperCamelCase__ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
__lowerCamelCase : Dict = input('Enter the value of the root node: ' ).strip().lower()
__lowerCamelCase : Any = queue.Queue()
__lowerCamelCase : Optional[Any] = TreeNode(int(__lowerCAmelCase ) )
q.put(__lowerCAmelCase )
while not q.empty():
__lowerCamelCase : Optional[Any] = q.get()
__lowerCamelCase : List[Any] = f'Enter the left node of {node_found.data}: '
__lowerCamelCase : Any = input(__lowerCAmelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCamelCase : Union[str, Any] = TreeNode(int(__lowerCAmelCase ) )
__lowerCamelCase : Optional[int] = left_node
q.put(__lowerCAmelCase )
__lowerCamelCase : str = f'Enter the right node of {node_found.data}: '
__lowerCamelCase : str = input(__lowerCAmelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCamelCase : int = TreeNode(int(__lowerCAmelCase ) )
__lowerCamelCase : List[str] = right_node
q.put(__lowerCAmelCase )
raise
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
__lowerCamelCase : str = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
__lowerCamelCase : int = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
__lowerCamelCase : int = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
__lowerCamelCase : str = []
while not q.empty():
__lowerCamelCase : Optional[Any] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__lowerCAmelCase )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Dict = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(__lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = n.left
# end of while means current node doesn't have left child
__lowerCamelCase : str = stack.pop()
# start to traverse its right child
__lowerCamelCase : List[Any] = n.right
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : List[str] = node
while n or stack:
while n:
stack.append(__lowerCAmelCase )
__lowerCamelCase : Dict = n.left
__lowerCamelCase : List[str] = stack.pop()
print(n.data , end=',' )
__lowerCamelCase : Tuple = n.right
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
__lowerCamelCase , __lowerCamelCase : List[Any] = [], []
__lowerCamelCase : str = node
stacka.append(__lowerCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
__lowerCamelCase : Dict = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__lowerCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ = "" , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="*" ):
if not s:
return "\n" + width * char
__lowerCamelCase , __lowerCamelCase : Any = divmod(width - len(__lowerCAmelCase ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowercase_ = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 5_0 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 371 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: NestedDataStructureLike[PathLike] , a: Optional[NamedSplit] = None , a: Optional[Features] = None , a: str = None , a: bool = False , a: bool = False , a: Optional[int] = None , **a: Optional[Any] , ):
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
__lowerCamelCase : List[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
__lowerCamelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : List[str] = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _snake_case ( self: List[str] ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
__lowerCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Dataset , a: Union[PathLike, BinaryIO] , a: Optional[int] = None , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : List[str] = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : List[Any] = parquet_writer_kwargs
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Optional[int] = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Any = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _snake_case ( self: Optional[int] , a: BinaryIO , a: int , **a: str ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , a )
__lowerCamelCase : str = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Any = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 194 | 0 |
from math import factorial
UpperCamelCase_ = {str(digit): factorial(digit) for digit in range(10)}
def lowerCamelCase_ ( _a : str ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_A ) )
def lowerCamelCase_ ( _a : Any = 60 , _a : int = 100_0000 ):
'''simple docstring'''
if not isinstance(_A , _A ) or not isinstance(_A , _A ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
UpperCAmelCase_ : Dict = 0
# the cached sizes of the previous chains
UpperCAmelCase_ : List[Any] = {}
for start_chain_element in range(1 , _A ):
# The temporary set will contain the elements of the chain
UpperCAmelCase_ : Tuple = set()
UpperCAmelCase_ : List[str] = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase_ : Any = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_A )
chain_set_length += 1
UpperCAmelCase_ : Any = digit_factorial_sum(_A )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase_ : Optional[int] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution()}")
| 345 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Dict = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 | 0 |
from __future__ import annotations
from typing import Any
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if not postfix_notation:
return 0
lowercase : Optional[int] = {"""+""", """-""", """*""", """/"""}
lowercase : list[Any] = []
for token in postfix_notation:
if token in operations:
lowercase , lowercase : List[str] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(SCREAMING_SNAKE_CASE__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 |
from collections.abc import Callable
import numpy as np
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> np.array:
lowercase : Optional[int] = int(np.ceil((x_end - xa) / step_size ) )
lowercase : List[Any] = np.zeros((n + 1,) )
lowercase : Optional[int] = ya
lowercase : Optional[int] = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
lowercase : str = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
lowercase : Union[str, Any] = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 1 |
from math import ceil, sqrt
def lowercase( UpperCamelCase_ = 1000000 ) -> int:
'''simple docstring'''
UpperCamelCase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCamelCase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'''{solution() = }''')
| 343 | import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = embeddings_size
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = num_groups
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = BitModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = BitForImageClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = BitBackbone(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCAmelCase = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCamelCase_ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=lowerCamelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ):
UpperCamelCase = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase = layer_type
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**lowerCamelCase_ )
# verify the logits
UpperCamelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
__lowerCAmelCase = BitConfig
__lowerCAmelCase = False
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = BitModelTester(self )
| 343 | 1 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowerCamelCase : List[str] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
_A : bool = field(default=__a , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
_A : bool = field(
default=__a , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_A : bool = field(default=__a , metadata={'''help''': '''whether to use adafactor'''} )
_A : Optional[float] = field(
default=__a , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
_A : Optional[float] = field(
default=__a , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
_A : Optional[float] = field(default=__a , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
_A : Optional[float] = field(
default=__a , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
_A : Optional[str] = field(
default='''linear''' , metadata={'''help''': f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , ) | 306 |
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82 | 306 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 293 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = XGLMTokenizer
__magic_name__ :Any = XGLMTokenizerFast
__magic_name__ :Dict = True
__magic_name__ :Union[str, Any] = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = '<pad>'
lowerCAmelCase__ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def snake_case ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase , f.name )
lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase )
pickle.loads(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = self.get_rust_tokenizer()
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'Hello World!'
lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
| 293 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE : Any = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 284 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0 | 284 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.