code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def a__ ( lowercase__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
UpperCAmelCase_ =sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """char"""
UpperCamelCase__ = """bpe"""
UpperCamelCase__ = """wp"""
__A : Union[str, Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """char_tokenizer"""]
UpperCamelCase__ = """ViTImageProcessor"""
UpperCamelCase__ = """MgpstrTokenizer"""
def __init__( self : List[str] , __UpperCamelCase : Dict=None , __UpperCamelCase : int=None , **__UpperCamelCase : Dict )->Union[str, Any]:
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop('''feature_extractor''' )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
_UpperCAmelCase = tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained('''gpt2''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : Any , __UpperCamelCase : str=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=None , **__UpperCamelCase : List[str] )->List[Any]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.char_tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings['''input_ids''']
return inputs
def lowercase__ ( self : str , __UpperCamelCase : Dict )->str:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = sequences
_UpperCAmelCase = char_preds.size(0 )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__UpperCamelCase , '''char''' )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__UpperCamelCase , '''bpe''' )
_UpperCAmelCase , _UpperCAmelCase = self._decode_helper(__UpperCamelCase , '''wp''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
for i in range(__UpperCamelCase ):
_UpperCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
_UpperCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
_UpperCAmelCase = scores.index(max(__UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_UpperCAmelCase = {}
_UpperCAmelCase = final_strs
_UpperCAmelCase = final_scores
_UpperCAmelCase = char_strs
_UpperCAmelCase = bpe_strs
_UpperCAmelCase = wp_strs
return out
def lowercase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] )->List[str]:
if format == DecodeType.CHARACTER:
_UpperCAmelCase = self.char_decode
_UpperCAmelCase = 1
_UpperCAmelCase = '''[s]'''
elif format == DecodeType.BPE:
_UpperCAmelCase = self.bpe_decode
_UpperCAmelCase = 2
_UpperCAmelCase = '''#'''
elif format == DecodeType.WORDPIECE:
_UpperCAmelCase = self.wp_decode
_UpperCAmelCase = 1_0_2
_UpperCAmelCase = '''[SEP]'''
else:
raise ValueError(F'Format {format} is not supported.' )
_UpperCAmelCase , _UpperCAmelCase = [], []
_UpperCAmelCase = pred_logits.size(0 )
_UpperCAmelCase = pred_logits.size(1 )
_UpperCAmelCase , _UpperCAmelCase = pred_logits.topk(1 , dim=-1 , largest=__UpperCamelCase , sorted=__UpperCamelCase )
_UpperCAmelCase = preds_index.view(-1 , __UpperCamelCase )[:, 1:]
_UpperCAmelCase = decoder(__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = torch.nn.functional.softmax(__UpperCamelCase , dim=2 ).max(dim=2 )
_UpperCAmelCase = preds_max_prob[:, 1:]
for index in range(__UpperCamelCase ):
_UpperCAmelCase = preds_str[index].find(__UpperCamelCase )
_UpperCAmelCase = preds_str[index][:pred_eos]
_UpperCAmelCase = preds_index[index].cpu().tolist()
_UpperCAmelCase = pred_index.index(__UpperCamelCase ) if eos_token in pred_index else -1
_UpperCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
_UpperCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCamelCase )
conf_scores.append(__UpperCamelCase )
return dec_strs, conf_scores
def lowercase__ ( self : Tuple , __UpperCamelCase : Union[str, Any] )->List[Any]:
_UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__UpperCamelCase )]
return decode_strs
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[int] )->Union[str, Any]:
return self.bpe_tokenizer.batch_decode(__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] )->Tuple:
_UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__UpperCamelCase )]
return decode_strs
| 710 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
random.seed(_SCREAMING_SNAKE_CASE )
np.random.seed(_SCREAMING_SNAKE_CASE )
torch.manual_seed(_SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE )
# ^^ safe to call this function even if cuda is not available
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] , __UpperCamelCase : float = 0.9_9_9_9 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[float, int] = 1.0 , __UpperCamelCase : Union[float, int] = 2 / 3 , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Dict[str, Any] = None , **__UpperCamelCase : Optional[Any] , )->Tuple:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase = True
if kwargs.get('''max_value''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
_UpperCAmelCase = kwargs['''min_value''']
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __UpperCamelCase ) is not None:
_UpperCAmelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase )
self.to(device=kwargs['''device'''] )
_UpperCAmelCase = None
_UpperCAmelCase = decay
_UpperCAmelCase = min_decay
_UpperCAmelCase = update_after_step
_UpperCAmelCase = use_ema_warmup
_UpperCAmelCase = inv_gamma
_UpperCAmelCase = power
_UpperCAmelCase = 0
_UpperCAmelCase = None # set in `step()`
_UpperCAmelCase = model_cls
_UpperCAmelCase = model_config
@classmethod
def lowercase__ ( cls : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Any )->"EMAModel":
_UpperCAmelCase , _UpperCAmelCase = model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase )
_UpperCAmelCase = model_cls.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config )
ema_model.load_state_dict(__UpperCamelCase )
return ema_model
def lowercase__ ( self : str , __UpperCamelCase : Any )->Optional[Any]:
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_UpperCAmelCase = self.model_cls.from_config(self.model_config )
_UpperCAmelCase = self.state_dict()
state_dict.pop('''shadow_params''' , __UpperCamelCase )
model.register_to_config(**__UpperCamelCase )
self.copy_to(model.parameters() )
model.save_pretrained(__UpperCamelCase )
def lowercase__ ( self : int , __UpperCamelCase : int )->float:
_UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase = (1 + step) / (1_0 + step)
_UpperCAmelCase = min(__UpperCamelCase , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase = max(__UpperCamelCase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->Optional[Any]:
if isinstance(__UpperCamelCase , torch.nn.Module ):
_UpperCAmelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __UpperCamelCase , standard_warn=__UpperCamelCase , )
_UpperCAmelCase = parameters.parameters()
_UpperCAmelCase = list(__UpperCamelCase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase = self.get_decay(self.optimization_step )
_UpperCAmelCase = decay
_UpperCAmelCase = 1 - decay
_UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase = deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__UpperCamelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
_UpperCAmelCase = list(__UpperCamelCase )
for s_param, param in zip(self.shadow_params , __UpperCamelCase ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase__ ( self : List[str] , __UpperCamelCase : str=None , __UpperCamelCase : Union[str, Any]=None )->None:
_UpperCAmelCase = [
p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase )
for p in self.shadow_params
]
def lowercase__ ( self : Optional[Any] )->dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase__ ( self : Any , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
_UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def lowercase__ ( self : List[Any] , __UpperCamelCase : Iterable[torch.nn.Parameter] )->None:
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase = None
def lowercase__ ( self : Any , __UpperCamelCase : dict )->None:
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
_UpperCAmelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_UpperCAmelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __UpperCamelCase ):
raise ValueError('''Invalid min_decay''' )
_UpperCAmelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __UpperCamelCase ):
raise ValueError('''Invalid optimization_step''' )
_UpperCAmelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __UpperCamelCase ):
raise ValueError('''Invalid update_after_step''' )
_UpperCAmelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __UpperCamelCase ):
raise ValueError('''Invalid use_ema_warmup''' )
_UpperCAmelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_UpperCAmelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_UpperCAmelCase = state_dict.get('''shadow_params''' , __UpperCamelCase )
if shadow_params is not None:
_UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , __UpperCamelCase ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 95 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 311 | '''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCAmelCase :str = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowercase__ ) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE : Tuple = deprecated_arg[3:]
SCREAMING_SNAKE_CASE : Optional[int] = not kwargs.pop(lowercase__ )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('tpu_name' , self.tpu_name )
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('device_idx' , self.device_idx )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('eager_mode' , self.eager_mode )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**lowercase__ )
snake_case__ : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={"help": "Name of TPU"} , )
snake_case__ : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
snake_case__ : bool = field(default=_SCREAMING_SNAKE_CASE , metadata={"help": "Benchmark models in eager model."} )
snake_case__ : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _UpperCamelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE : str = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE : str = None
return tpu
@cached_property
def _UpperCamelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
SCREAMING_SNAKE_CASE : List[str] = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
SCREAMING_SNAKE_CASE : Optional[int] = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _UpperCamelCase ( self ) -> bool:
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def _UpperCamelCase ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def _UpperCamelCase ( self ) -> Optional[int]:
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def _UpperCamelCase ( self ) -> int:
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _UpperCamelCase ( self ) -> bool:
return self.n_gpu > 0
| 251 | 0 |
UpperCAmelCase__ : Dict = range(2, 20 + 1)
UpperCAmelCase__ : Dict = [10**k for k in range(ks[-1] + 1)]
UpperCAmelCase__ : dict[int, dict[int, list[list[int]]]] = {}
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : str = sum(a_i[j] for j in range(_UpperCamelCase , len(_UpperCamelCase ) ) )
_UpperCAmelCase : Tuple = sum(a_i[j] * base[j] for j in range(min(len(_UpperCamelCase ) , _UpperCamelCase ) ) )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = 0, 0
_UpperCAmelCase : Union[str, Any] = n - i
_UpperCAmelCase : Tuple = memo.get(_UpperCamelCase )
if sub_memo is not None:
_UpperCAmelCase : List[str] = sub_memo.get(_UpperCamelCase )
if jumps is not None and len(_UpperCamelCase ) > 0:
# find and make the largest jump without going over
_UpperCAmelCase : int = -1
for _k in range(len(_UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCAmelCase : Union[str, Any] = _k
break
if max_jump >= 0:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCAmelCase : List[str] = diff + c
for j in range(min(_UpperCamelCase , len(_UpperCamelCase ) ) ):
_UpperCAmelCase , _UpperCAmelCase : Any = divmod(_UpperCamelCase , 10 )
if new_c > 0:
add(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
_UpperCAmelCase : Any = []
else:
_UpperCAmelCase : Any = {c: []}
_UpperCAmelCase : Union[str, Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = next_term(_UpperCamelCase , k - 1 , i + dn , _UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCAmelCase , _UpperCAmelCase : str = compute(_UpperCamelCase , _UpperCamelCase , i + dn , _UpperCamelCase )
diff += _diff
dn += terms_jumped
_UpperCAmelCase : Union[str, Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCAmelCase : Dict = 0
while j < len(_UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if i >= n:
return 0, i
if k > len(_UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(_UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCAmelCase : int = i
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = 0, 0, 0
for j in range(len(_UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCAmelCase : List[Any] = ds_c + ds_b
diff += addend
_UpperCAmelCase : Union[str, Any] = 0
for j in range(_UpperCamelCase ):
_UpperCAmelCase : int = a_i[j] + addend
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = divmod(_UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return diff, i - start_i
def _A ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for j in range(_UpperCamelCase , len(_UpperCamelCase ) ):
_UpperCAmelCase : Optional[int] = digits[j] + addend
if s >= 10:
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = divmod(_UpperCamelCase , 10 )
_UpperCAmelCase : List[str] = addend // 10 + quotient
else:
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : List[Any] = addend // 10
if addend == 0:
break
while addend > 0:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = divmod(_UpperCamelCase , 10 )
digits.append(_UpperCamelCase )
def _A ( _UpperCamelCase = 10**15 ):
_UpperCAmelCase : str = [1]
_UpperCAmelCase : Any = 1
_UpperCAmelCase : str = 0
while True:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = next_term(_UpperCamelCase , 20 , i + dn , _UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
_UpperCAmelCase : List[Any] = 0
for j in range(len(_UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 416 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = PriorTransformer
SCREAMING_SNAKE_CASE_ : List[Any] = """hidden_states"""
@property
def a_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 4
_UpperCAmelCase : int = 8
_UpperCAmelCase : Any = 7
_UpperCAmelCase : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_UpperCAmelCase : str = floats_tensor((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a_ ( self : Any , UpperCAmelCase_ : List[Any]=0 ) -> Dict:
'''simple docstring'''
torch.manual_seed(UpperCAmelCase_ )
_UpperCAmelCase : Any = 4
_UpperCAmelCase : int = 8
_UpperCAmelCase : Optional[Any] = 7
_UpperCAmelCase : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_UpperCAmelCase : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def a_ ( self : int ) -> str:
'''simple docstring'''
return (4, 8)
@property
def a_ ( self : Any ) -> int:
'''simple docstring'''
return (4, 8)
def a_ ( self : int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
_UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(UpperCAmelCase_ )
_UpperCAmelCase : Any = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def a_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase : str = self.model_class(**UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCAmelCase : List[str] = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] , UpperCAmelCase_ )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
_UpperCAmelCase : Optional[int] = model.to(UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , '''set_default_attn_processor''' ):
model.set_default_attn_processor()
_UpperCAmelCase : Tuple = self.get_dummy_seed_input()
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**UpperCAmelCase_ )[0]
_UpperCAmelCase : List[str] = output[0, :5].flatten().cpu()
print(UpperCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
_UpperCAmelCase : List[str] = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1E-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def a_ ( self : List[Any] , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : List[Any]=77 , UpperCAmelCase_ : List[str]=0 ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(UpperCAmelCase_ )
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : str = embedding_dim
_UpperCAmelCase : Union[str, Any] = num_embeddings
_UpperCAmelCase : int = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_UpperCAmelCase : List[Any] = torch.randn((batch_size, embedding_dim) ).to(UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(UpperCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def a_ ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' , subfolder='''prior''' )
model.to(UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = self.get_dummy_seed_input(seed=UpperCAmelCase_ )
with torch.no_grad():
_UpperCAmelCase : Dict = model(**UpperCAmelCase_ )[0]
assert list(sample.shape ) == [1, 768]
_UpperCAmelCase : int = sample[0, :8].flatten().cpu()
print(UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = torch.tensor(UpperCAmelCase_ )
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
| 416 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ):
"""simple docstring"""
lowercase__ : List[str] = {"add_prefix_space": True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(" " ) else {}
lowercase__ : List[Any] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding="max_length" if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ):
"""simple docstring"""
lowercase__ : Dict = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str="train" , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]="" , ):
super().__init__()
lowercase__ : List[str] = Path(SCREAMING_SNAKE_CASE ).joinpath(type_path + ".source" )
lowercase__ : Optional[int] = Path(SCREAMING_SNAKE_CASE ).joinpath(type_path + ".target" )
lowercase__ : int = self.get_char_lens(self.src_file )
lowercase__ : Optional[Any] = max_source_length
lowercase__ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
lowercase__ : List[str] = tokenizer
lowercase__ : Optional[int] = prefix
if n_obs is not None:
lowercase__ : str = self.src_lens[:n_obs]
lowercase__ : List[Any] = src_lang
lowercase__ : Optional[int] = tgt_lang
def __len__( self : Tuple ):
return len(self.src_lens )
def __getitem__( self : int , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Union[str, Any] = index + 1 # linecache starts at 1
lowercase__ : List[Any] = self.prefix + linecache.getline(str(self.src_file ) , SCREAMING_SNAKE_CASE ).rstrip("\n" )
lowercase__ : int = linecache.getline(str(self.tgt_file ) , SCREAMING_SNAKE_CASE ).rstrip("\n" )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
lowercase__ : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE ) else self.tokenizer
)
lowercase__ : List[str] = self.tokenizer.generator if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE ) else self.tokenizer
lowercase__ : List[str] = encode_line(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.max_source_length , "right" )
lowercase__ : Optional[int] = encode_line(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.max_target_length , "right" )
lowercase__ : str = source_inputs["input_ids"].squeeze()
lowercase__ : Any = target_inputs["input_ids"].squeeze()
lowercase__ : Any = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE : Optional[int] ):
return [len(SCREAMING_SNAKE_CASE ) for x in Path(SCREAMING_SNAKE_CASE ).open().readlines()]
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Tuple = torch.stack([x["input_ids"] for x in batch] )
lowercase__ : Any = torch.stack([x["attention_mask"] for x in batch] )
lowercase__ : Tuple = torch.stack([x["decoder_input_ids"] for x in batch] )
lowercase__ : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
lowercase__ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
lowercase__ : Tuple = trim_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : int = trim_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "git_log.json" ) )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[str] = git.Repo(search_parent_directories=lowerCamelCase__ )
lowercase__ : List[Any] = {
"repo_id": str(lowerCamelCase__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
with open(lowerCamelCase__ , "wb" ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def remove_articles(lowerCamelCase__ ):
return re.sub(R"\b(a|an|the)\b" , " " , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
lowercase__ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = normalize_answer(lowerCamelCase__ ).split()
lowercase__ : Tuple = normalize_answer(lowerCamelCase__ ).split()
lowercase__ : Any = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
lowercase__ : Any = sum(common.values() )
if num_same == 0:
return 0
lowercase__ : List[str] = 1.0 * num_same / len(lowerCamelCase__ )
lowercase__ : str = 1.0 * num_same / len(lowerCamelCase__ )
lowercase__ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
lowercase__ : Optional[int] = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return model_prefix.startswith("rag" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
lowercase__ : List[Any] = "dropout_rate"
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
lowercase__ : Any = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 496 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase_ = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowerCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ : int = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase__ )
datasets.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.set_verbosity(lowerCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase__ : Any = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ : List[str] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Union[str, Any] = train_dataset.features["label"].names
if training_args.do_eval:
lowercase__ : Dict = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : str = eval_dataset.features["label"].names
if training_args.do_predict:
lowercase__ : int = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : Tuple = predict_dataset.features["label"].names
# Labels
lowercase__ : List[str] = len(lowerCamelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase__ : Union[str, Any] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__ : Tuple = False
def preprocess_function(lowerCamelCase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__ : List[Any] = min(len(lowerCamelCase__ ) , data_args.max_train_samples )
lowercase__ : int = train_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowercase__ : Union[str, Any] = train_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__ : List[Any] = min(len(lowerCamelCase__ ) , data_args.max_eval_samples )
lowercase__ : Optional[Any] = eval_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowercase__ : Optional[int] = eval_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase__ : Tuple = min(len(lowerCamelCase__ ) , data_args.max_predict_samples )
lowercase__ : Tuple = predict_dataset.select(range(lowerCamelCase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowercase__ : Tuple = predict_dataset.map(
lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowercase__ : Optional[int] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase__ ):
lowercase__ : Tuple = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions
lowercase__ : int = np.argmax(lowerCamelCase__ , axis=1 )
return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__ : int = default_data_collator
elif training_args.fpaa:
lowercase__ : Tuple = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 )
else:
lowercase__ : Union[str, Any] = None
# Initialize our Trainer
lowercase__ : Tuple = Trainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , )
# Training
if training_args.do_train:
lowercase__ : Tuple = None
if training_args.resume_from_checkpoint is not None:
lowercase__ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ : Union[str, Any] = last_checkpoint
lowercase__ : str = trainer.train(resume_from_checkpoint=lowerCamelCase__ )
lowercase__ : str = train_result.metrics
lowercase__ : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ )
)
lowercase__ : List[Any] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCamelCase__ )
trainer.save_metrics("train" , lowerCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ : List[str] = trainer.evaluate(eval_dataset=lowerCamelCase__ )
lowercase__ : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ )
lowercase__ : Optional[int] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("eval" , lowerCamelCase__ )
trainer.save_metrics("eval" , lowerCamelCase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(lowerCamelCase__ , metric_key_prefix="predict" )
lowercase__ : str = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ )
)
lowercase__ : Optional[Any] = min(lowerCamelCase__ , len(lowerCamelCase__ ) )
trainer.log_metrics("predict" , lowerCamelCase__ )
trainer.save_metrics("predict" , lowerCamelCase__ )
lowercase__ : str = np.argmax(lowerCamelCase__ , axis=1 )
lowercase__ : Any = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowerCamelCase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCamelCase__ ):
lowercase__ : Optional[int] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 496 | 1 |
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : list[str] ):
__lowercase = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCamelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 703 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_SCREAMING_SNAKE_CASE = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase = test_results.split(''' ''' )
__lowercase = 0
__lowercase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowercase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCamelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
__lowercase = {}
__lowercase = None
__lowercase = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , lowerCamelCase_ ):
__lowercase = True
__lowercase = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__lowercase = line
__lowercase = False
return failures
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase ) -> Any:
'''simple docstring'''
__lowercase = title
__lowercase = doc_test_results['''time_spent'''].split(''',''' )[0]
__lowercase = doc_test_results['''success''']
__lowercase = doc_test_results['''failures''']
__lowercase = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowercase = doc_test_results
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self._time_spent]
__lowercase = 0
for time in time_spent:
__lowercase = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_lowerCamelCase ) == 1:
__lowercase = [0, 0, time_parts[0]]
__lowercase , __lowercase , __lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowercase , __lowercase , __lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(_lowerCamelCase )}h{int(_lowerCamelCase )}m{int(_lowerCamelCase )}s"
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = 40
__lowercase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_lowerCamelCase ,_lowerCamelCase )}
__lowercase = ''''''
for category, failures in category_failures.items():
if len(_lowerCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_lowerCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_lowerCamelCase )
@staticmethod
def _UpperCAmelCase () -> List[str]:
'''simple docstring'''
__lowercase = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_lowerCamelCase )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__lowercase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__lowercase = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=_lowerCamelCase ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ''''''
for key, value in failures.items():
__lowercase = value[:200] + ''' [Truncated]''' if len(_lowerCamelCase ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__lowercase = job_name
__lowercase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__lowercase = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__lowercase = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__lowercase = sorted(self.doc_test_results.items() ,key=lambda _lowerCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__lowercase = f"*Num failures* :{len(job_result['failed'] )} \n"
__lowercase = job_result['''failures''']
__lowercase = self.get_reply_blocks(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,text=_lowerCamelCase )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"Results for {job}" ,blocks=_lowerCamelCase ,thread_ts=self.thread_ts['''ts'''] ,)
time.sleep(1 )
def _lowerCAmelCase ( ):
__lowercase = os.environ['''GITHUB_RUN_ID''']
__lowercase = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__lowercase = requests.get(lowerCamelCase_ ).json()
__lowercase = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__lowercase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCamelCase_ ):
__lowercase = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCamelCase_ )
return {}
def _lowerCAmelCase ( lowerCamelCase_ : str ):
__lowercase = {}
if os.path.exists(lowerCamelCase_ ):
__lowercase = os.listdir(lowerCamelCase_ )
for file in files:
try:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , encoding='''utf-8''' ) as f:
__lowercase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCamelCase_ , lowerCamelCase_ )}." ) from e
return _artifact
def _lowerCAmelCase ( ):
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
__lowercase = name
__lowercase = []
def __str__(self ) -> List[str]:
'''simple docstring'''
return self.name
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
__lowercase = {}
__lowercase = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowercase = directory
if artifact_name not in _available_artifacts:
__lowercase = Artifact(lowerCamelCase_ )
_available_artifacts[artifact_name].add_path(lowerCamelCase_ )
return _available_artifacts
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = get_job_links()
_SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
_SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_SCREAMING_SNAKE_CASE = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_SCREAMING_SNAKE_CASE = github_actions_job_links.get('''run_doctests''')
_SCREAMING_SNAKE_CASE = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = handle_test_results(artifact['''stats'''])
_SCREAMING_SNAKE_CASE = failed
_SCREAMING_SNAKE_CASE = success
_SCREAMING_SNAKE_CASE = time_spent[1:-1] + ''', '''
_SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_SCREAMING_SNAKE_CASE = line.replace('''FAILED ''', '''''')
_SCREAMING_SNAKE_CASE = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.split('''::''')
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else '''N/A'''
_SCREAMING_SNAKE_CASE = failure
break
_SCREAMING_SNAKE_CASE = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 56 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 372 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 20 | 0 |
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class A__ ( _snake_case ):
lowercase = 42
lowercase = jnp.floataa
lowercase = True
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
super().setup()
A_ = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
A_ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class A__ ( _snake_case ):
lowercase = FlaxBigBirdForNaturalQuestionsModule
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
def cross_entropy(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None ):
A_ = logits.shape[-1]
A_ = (labels[..., None] == jnp.arange(UpperCAmelCase__ )[None]).astype("""f4""" )
A_ = jax.nn.log_softmax(UpperCAmelCase__, axis=-1 )
A_ = -jnp.sum(labels * logits, axis=-1 )
if reduction is not None:
A_ = reduction(UpperCAmelCase__ )
return loss
A_ = partial(UpperCAmelCase__, reduction=jnp.mean )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
A_ = cross_entropy(UpperCAmelCase__, UpperCAmelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class A__ :
lowercase = "google/bigbird-roberta-base"
lowercase = 3_000
lowercase = 10_500
lowercase = 128
lowercase = 3
lowercase = 1
lowercase = 5
# tx_args
lowercase = 3e-5
lowercase = 0.0
lowercase = 20_000
lowercase = 0.0095
lowercase = "bigbird-roberta-natural-questions"
lowercase = "training-expt"
lowercase = "data/nq-training.jsonl"
lowercase = "data/nq-validation.jsonl"
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=UpperCamelCase__ )
A_ = os.path.join(self.base_dir , self.save_dir )
A_ = self.batch_size_per_device * jax.device_count()
@dataclass
class A__ :
lowercase = 42
lowercase = 4_096 # no dynamic padding on TPUs
def __call__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = self.collate_fn(UpperCamelCase__ )
A_ = jax.tree_util.tree_map(UpperCamelCase__ , UpperCamelCase__ )
return batch
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ , A_ = self.fetch_inputs(features["""input_ids"""] )
A_ = {
"""input_ids""": jnp.array(UpperCamelCase__ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(UpperCamelCase__ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = [self._fetch_inputs(UpperCamelCase__ ) for ids in input_ids]
return zip(*UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = [1 for _ in range(len(UpperCamelCase__ ) )]
while len(UpperCamelCase__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=None ) -> int:
if seed is not None:
A_ = dataset.shuffle(seed=UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) // batch_size ):
A_ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCAmelCase__ )
@partial(jax.pmap, axis_name="""batch""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ) -> Any:
def loss_fn(UpperCAmelCase__ ):
A_ = model_inputs.pop("""start_labels""" )
A_ = model_inputs.pop("""end_labels""" )
A_ = model_inputs.pop("""pooled_labels""" )
A_ = state.apply_fn(**UpperCAmelCase__, params=UpperCAmelCase__, dropout_rng=UpperCAmelCase__, train=UpperCAmelCase__ )
A_ , A_ , A_ = outputs
return state.loss_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
A_ , A_ = jax.random.split(UpperCAmelCase__ )
A_ = jax.value_and_grad(UpperCAmelCase__ )
A_ , A_ = grad_fn(state.params )
A_ = jax.lax.pmean({"""loss""": loss}, axis_name="""batch""" )
A_ = jax.lax.pmean(UpperCAmelCase__, """batch""" )
A_ = state.apply_gradients(grads=UpperCAmelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap, axis_name="""batch""" )
def UpperCAmelCase__ ( UpperCAmelCase__, **UpperCAmelCase__ ) -> List[str]:
A_ = model_inputs.pop("""start_labels""" )
A_ = model_inputs.pop("""end_labels""" )
A_ = model_inputs.pop("""pooled_labels""" )
A_ = state.apply_fn(**UpperCAmelCase__, params=state.params, train=UpperCAmelCase__ )
A_ , A_ , A_ = outputs
A_ = state.loss_fn(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = jax.lax.pmean({"""loss""": loss}, axis_name="""batch""" )
return metrics
class A__ ( train_state.TrainState ):
lowercase = struct.field(pytree_node=_snake_case )
@dataclass
class A__ :
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = None
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> int:
'''simple docstring'''
A_ = model.params
A_ = TrainState.create(
apply_fn=model.__call__ , params=UpperCamelCase__ , tx=UpperCamelCase__ , loss_fn=UpperCamelCase__ , )
if ckpt_dir is not None:
A_ , A_ , A_ , A_ , A_ = restore_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
A_ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
A_ , A_ = build_tx(**UpperCamelCase__ )
A_ = train_state.TrainState(
step=UpperCamelCase__ , apply_fn=model.__call__ , params=UpperCamelCase__ , tx=UpperCamelCase__ , opt_state=UpperCamelCase__ , )
A_ = args
A_ = data_collator
A_ = lr
A_ = params
A_ = jax_utils.replicate(UpperCamelCase__ )
return state
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.args
A_ = len(UpperCamelCase__ ) // args.batch_size
A_ = jax.random.PRNGKey(0 )
A_ = jax.random.split(UpperCamelCase__ , jax.device_count() )
for epoch in range(args.max_epochs ):
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = get_batched_dataset(UpperCamelCase__ , args.batch_size , seed=UpperCamelCase__ )
A_ = 0
for batch in tqdm(UpperCamelCase__ , total=UpperCamelCase__ , desc=f'''Running EPOCH-{epoch}''' ):
A_ = self.data_collator(UpperCamelCase__ )
A_ , A_ , A_ = self.train_step_fn(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
A_ = jax_utils.unreplicate(state.step )
A_ = running_loss.item() / i
A_ = self.scheduler_fn(state_step - 1 )
A_ = self.evaluate(UpperCamelCase__ , UpperCamelCase__ )
A_ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(UpperCamelCase__ ) )
self.logger.log(UpperCamelCase__ , commit=UpperCamelCase__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = get_batched_dataset(UpperCamelCase__ , self.args.batch_size )
A_ = len(UpperCamelCase__ ) // self.args.batch_size
A_ = jnp.array(0 , dtype=jnp.floataa )
A_ = 0
for batch in tqdm(UpperCamelCase__ , total=UpperCamelCase__ , desc="""Evaluating ... """ ):
A_ = self.data_collator(UpperCamelCase__ )
A_ = self.val_step_fn(UpperCamelCase__ , **UpperCamelCase__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = jax_utils.unreplicate(UpperCamelCase__ )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(UpperCamelCase__ , params=state.params )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(UpperCamelCase__ , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , UpperCamelCase__ )
print("""DONE""" )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> str:
print(F'''RESTORING CHECKPOINT FROM {save_dir}''', end=""" ... """ )
with open(os.path.join(UpperCAmelCase__, """flax_model.msgpack""" ), """rb""" ) as f:
A_ = from_bytes(state.params, f.read() )
with open(os.path.join(UpperCAmelCase__, """opt_state.msgpack""" ), """rb""" ) as f:
A_ = from_bytes(state.opt_state, f.read() )
A_ = joblib.load(os.path.join(UpperCAmelCase__, """args.joblib""" ) )
A_ = joblib.load(os.path.join(UpperCAmelCase__, """data_collator.joblib""" ) )
with open(os.path.join(UpperCAmelCase__, """training_state.json""" ), """r""" ) as f:
A_ = json.load(UpperCAmelCase__ )
A_ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
A_ = num_train_steps - warmup_steps
A_ = optax.linear_schedule(init_value=UpperCAmelCase__, end_value=UpperCAmelCase__, transition_steps=UpperCAmelCase__ )
A_ = optax.linear_schedule(init_value=UpperCAmelCase__, end_value=1e-7, transition_steps=UpperCAmelCase__ )
A_ = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps] )
return lr
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]:
def weight_decay_mask(UpperCAmelCase__ ):
A_ = traverse_util.flatten_dict(UpperCAmelCase__ )
A_ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCAmelCase__ )
A_ = scheduler_fn(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = optax.adamw(learning_rate=UpperCAmelCase__, weight_decay=UpperCAmelCase__, mask=UpperCAmelCase__ )
return tx, lr
| 713 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = FileLock(str(tmpdir / """foo.lock""" ) )
A_ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
A_ = time.time()
locka.acquire(UpperCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Dict:
A_ = """a""" * 10_00 + """.lock"""
A_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
A_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase__ ):
locka.acquire(0 )
| 667 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Any = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = "levit"
def __init__( self : List[Any], UpperCamelCase__ : str=2_24, UpperCamelCase__ : str=3, UpperCamelCase__ : Dict=3, UpperCamelCase__ : Any=2, UpperCamelCase__ : str=1, UpperCamelCase__ : List[str]=16, UpperCamelCase__ : Any=[1_28, 2_56, 3_84], UpperCamelCase__ : str=[4, 8, 12], UpperCamelCase__ : Tuple=[4, 4, 4], UpperCamelCase__ : int=[16, 16, 16], UpperCamelCase__ : int=0, UpperCamelCase__ : List[str]=[2, 2, 2], UpperCamelCase__ : Union[str, Any]=[2, 2, 2], UpperCamelCase__ : List[Any]=0.02, **UpperCamelCase__ : int, ) -> Any:
super().__init__(**UpperCamelCase__ )
_A = image_size
_A = num_channels
_A = kernel_size
_A = stride
_A = padding
_A = hidden_sizes
_A = num_attention_heads
_A = depths
_A = key_dim
_A = drop_path_rate
_A = patch_size
_A = attention_ratio
_A = mlp_ratio
_A = initializer_range
_A = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def __UpperCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : str ) -> float:
return 1e-4
| 107 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class _snake_case :
'''simple docstring'''
def __init__( self : Dict , snake_case : int , snake_case : MutableSequence[float] ):
if len(snake_case ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase_ :list[float] = list(snake_case )
UpperCAmelCase_ :str = degree
def __add__( self : Any , snake_case : Polynomial ):
if self.degree > polynomial_a.degree:
UpperCAmelCase_ :int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , snake_case )
else:
UpperCAmelCase_ :Any = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , snake_case )
def __sub__( self : List[str] , snake_case : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , snake_case : Polynomial ):
UpperCAmelCase_ :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , snake_case )
def snake_case_ ( self : Optional[Any] , snake_case : int | float ):
UpperCAmelCase_ :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[Any] ):
UpperCAmelCase_ :List[str] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(snake_case )
return polynomial
def __repr__( self : int ):
return self.__str__()
def snake_case_ ( self : str ):
UpperCAmelCase_ :list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ :str = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , snake_case )
def snake_case_ ( self : Optional[int] , snake_case : int | float = 0 ):
UpperCAmelCase_ :list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ :List[str] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ :Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , snake_case )
def __eq__( self : int , snake_case : object ):
if not isinstance(snake_case , snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Optional[Any] , snake_case : object ):
return not self.__eq__(snake_case )
| 608 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE : Any = namedtuple("covid_data", "cases deaths recovered")
def UpperCamelCase ( _a = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
lowercase_ :Tuple = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(_a ).content ).xpath(_a ) )
SCREAMING_SNAKE_CASE : Optional[Any] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 441 |
import math
SCREAMING_SNAKE_CASE : List[str] = 10
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : int = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( _a = 2_0 ) -> str:
'''simple docstring'''
lowercase_ :List[str] = math.comb(_a , _a )
lowercase_ :Tuple = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _a )
lowercase_ :Dict = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 441 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __a :
UpperCamelCase_ : Tuple = MBartConfig
UpperCamelCase_ : Any = {}
UpperCamelCase_ : Optional[int] = '''gelu'''
def __init__( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=13 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : int=20 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : List[Any]=0 , )-> Dict:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_mbart_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] )-> str:
"""simple docstring"""
UpperCamelCase = TFMBartModel(config=UpperCAmelCase_ ).get_decoder()
UpperCamelCase = inputs_dict["input_ids"]
UpperCamelCase = input_ids[:1, :]
UpperCamelCase = inputs_dict["attention_mask"][:1, :]
UpperCamelCase = inputs_dict["head_mask"]
UpperCamelCase = 1
# first forward pass
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
UpperCamelCase = past_key_values[1]
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , )-> Tuple:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCamelCase_ : Optional[int] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase_ : Any = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : str = False
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] )-> Dict:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _SCREAMING_SNAKE_CASE ( self : int )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFMBartModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __a ( unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCamelCase_ : Tuple = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCamelCase_ : Optional[Any] = '''facebook/mbart-large-en-ro'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : str )-> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.translate_src_text(**UpperCAmelCase_ )
self.assertListEqual(self.expected_text , UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , **UpperCAmelCase_ : int )-> Dict:
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , **UpperCAmelCase_ , return_tensors="tf" )
UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase = self.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
return generated_words
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> str:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 554 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE = """MobileNetV1Config"""
# Base docstring
SCREAMING_SNAKE_CASE = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE = [1, 1_024, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None )-> int:
"""simple docstring"""
UpperCamelCase = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = model.mobilenet_va
else:
UpperCamelCase = model
UpperCamelCase = "MobilenetV1/Conv2d_0/"
UpperCamelCase = backbone.conv_stem.convolution.weight
UpperCamelCase = backbone.conv_stem.normalization.bias
UpperCamelCase = backbone.conv_stem.normalization.weight
UpperCamelCase = backbone.conv_stem.normalization.running_mean
UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13 ):
UpperCamelCase = i + 1
UpperCamelCase = i * 2
UpperCamelCase = backbone.layer[pt_index]
UpperCamelCase = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
UpperCamelCase = backbone.layer[pt_index + 1]
UpperCamelCase = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = "MobilenetV1/Logits/Conv2d_1c_1x1/"
UpperCamelCase = model.classifier.weight
UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
UpperCamelCase = tf.train.list_variables(UpperCAmelCase_ )
UpperCamelCase = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
UpperCamelCase = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = array
# Build TF to PyTorch weights loading map
UpperCamelCase = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
UpperCamelCase = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
UpperCamelCase = array.squeeze().transpose()
else:
UpperCamelCase = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
UpperCamelCase = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + "/RMSProp" , UpperCAmelCase_ )
tf_weights.pop(name + "/RMSProp_1" , UpperCAmelCase_ )
tf_weights.pop(name + "/ExponentialMovingAverage" , UpperCAmelCase_ )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> torch.Tensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = features.shape[-2:]
UpperCamelCase , UpperCamelCase = conv_layer.stride
UpperCamelCase , UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase = max(kernel_height - stride_height , 0 )
else:
UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
UpperCamelCase = max(kernel_width - stride_width , 0 )
else:
UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0 )
UpperCamelCase = pad_along_width // 2
UpperCamelCase = pad_along_width - pad_left
UpperCamelCase = pad_along_height // 2
UpperCamelCase = pad_along_height - pad_top
UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , "constant" , 0.0 )
class __a ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool or str] = True , )-> None:
"""simple docstring"""
super().__init__()
UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="zeros" , )
if use_normalization:
UpperCamelCase = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
UpperCamelCase = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
UpperCamelCase = ACTaFN[config.hidden_act]
else:
UpperCamelCase = config.hidden_act
else:
UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : torch.Tensor )-> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
UpperCamelCase = apply_tf_padding(UpperCAmelCase_ , self.convolution )
UpperCamelCase = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
UpperCamelCase = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
UpperCamelCase = self.activation(UpperCAmelCase_ )
return features
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[str] = MobileNetVaConfig
UpperCamelCase_ : Dict = load_tf_weights_in_mobilenet_va
UpperCamelCase_ : List[str] = '''mobilenet_v1'''
UpperCamelCase_ : Optional[int] = '''pixel_values'''
UpperCamelCase_ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Union[nn.Linear, nn.Convad] )-> None:
"""simple docstring"""
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , _lowerCAmelCase , )
class __a ( _lowerCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : bool = True )-> Optional[int]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
UpperCamelCase = config
UpperCamelCase = 32
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase = nn.ModuleList()
for i in range(13 ):
UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Dict )-> List[str]:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , )-> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
UpperCamelCase = self.conv_stem(UpperCAmelCase_ )
UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase = layer_module(UpperCAmelCase_ )
if output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = hidden_states
if self.pooler is not None:
UpperCamelCase = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCAmelCase , )
class __a ( _lowerCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : MobileNetVaConfig )-> None:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
UpperCamelCase = config.num_labels
UpperCamelCase = MobileNetVaModel(UpperCAmelCase_ )
UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
UpperCamelCase = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , )-> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(self.dropout(UpperCAmelCase_ ) )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = "single_label_classification"
else:
UpperCamelCase = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 554 | 1 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__magic_name__ = float('''nan''')
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase ):
snake_case__ = sys.stdout
snake_case__ = open(lowerCamelCase , "a" )
def __getattr__( self , lowerCamelCase ):
return getattr(self.stdout , lowerCamelCase )
def A_ ( self , lowerCamelCase ):
self.stdout.write(lowerCamelCase )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , lowerCamelCase , 0 , re.M ) )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase=80 , __lowerCAmelCase=False ):
snake_case__ = []
# deal with critical env vars
snake_case__ = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
snake_case__ = os.environ.get(__lowerCAmelCase , __lowerCAmelCase )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
snake_case__ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(__lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case__ = []
snake_case__ = ""
while len(__lowerCAmelCase ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__lowerCAmelCase )
snake_case__ = ""
return "\\\n".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
# unwrap multi-line input
snake_case__ = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
snake_case__ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
snake_case__ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
snake_case__ = subprocess.run(__lowerCAmelCase , capture_output=__lowerCAmelCase , text=__lowerCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
snake_case__ = variation.replace(" " , "-" )
with open(Path(__lowerCAmelCase ) / F"""log.{prefix}.stdout.txt""" , "w" ) as f:
f.write(result.stdout )
with open(Path(__lowerCAmelCase ) / F"""log.{prefix}.stderr.txt""" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" , "r" , encoding="utf-8" ) as f:
snake_case__ = json.load(__lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
snake_case__ = []
snake_case__ = []
snake_case__ = F"""{id}: {variation:<{longest_variation_len}}"""
snake_case__ = F"""{preamble}: """
snake_case__ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__lowerCAmelCase ) , desc=__lowerCAmelCase , leave=__lowerCAmelCase ):
snake_case__ = process_run_single(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ = single_run_metrics[target_metric_key]
if not math.isnan(__lowerCAmelCase ):
metrics.append(__lowerCAmelCase )
results.append(__lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
snake_case__ = F"""\33[2K\r{outcome}"""
if len(__lowerCAmelCase ) > 0:
snake_case__ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case__ = round(mean_metrics[target_metric_key] , 2 )
snake_case__ = F"""{outcome} {mean_target}"""
if len(__lowerCAmelCase ) > 1:
results_str += F""" {tuple(round(__lowerCAmelCase , 2 ) for x in results )}"""
print(__lowerCAmelCase )
snake_case__ = variation
return mean_metrics
else:
print(__lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = pd.DataFrame(__lowerCAmelCase )
snake_case__ = "variation"
snake_case__ = "diff_%"
snake_case__ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case__ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
snake_case__ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__lowerCAmelCase ):
snake_case__ = df.apply(
lambda __lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
snake_case__ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case__ = df.reindex(__lowerCAmelCase , axis="columns" ) # reorder cols
# capitalize
snake_case__ = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
snake_case__ = df.rename(lambda __lowerCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
snake_case__ = df.rename(lambda __lowerCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
snake_case__ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__lowerCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__lowerCAmelCase , floatfmt=".2f" )]
print("\n\n".join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=__lowerCAmelCase , type=__lowerCAmelCase , nargs="+" , required=__lowerCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=__lowerCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=__lowerCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=__lowerCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=__lowerCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
snake_case__ = parser.parse_args()
snake_case__ = args.output_dir
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
snake_case__ = get_base_command(__lowerCAmelCase , __lowerCAmelCase )
# split each dimension into its --foo variations
snake_case__ = [list(map(str.strip , re.split(R"\|" , __lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case__ = list(map(str.strip , map(" ".join , itertools.product(*__lowerCAmelCase ) ) ) )
snake_case__ = max(len(__lowerCAmelCase ) for x in variations )
# split wanted keys
snake_case__ = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case__ = F"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
snake_case__ = Tee(__lowerCAmelCase )
print(F"""\n*** Running {len(__lowerCAmelCase )} benchmarks:""" )
print(F"""Base command: {" ".join(__lowerCAmelCase )}""" )
snake_case__ = "variation"
snake_case__ = []
for id, variation in enumerate(tqdm(__lowerCAmelCase , desc="Total completion: " , leave=__lowerCAmelCase ) ):
snake_case__ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.repeat_times , __lowerCAmelCase , args.verbose , ) )
process_results(__lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.base_variation , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 530 |
import argparse
import struct
import unittest
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase ):
snake_case__ = data
# Initialize hash values
snake_case__ = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
snake_case__ = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
snake_case__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A_ ( lowerCamelCase ):
snake_case__ = B"\x80" + (B"\x00" * (63 - (len(lowerCamelCase ) + 8) % 64))
snake_case__ = struct.pack(">Q" , (len(lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def A_ ( self ):
# Convert into blocks of 64 bytes
snake_case__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
snake_case__ = list(struct.unpack(">16L" , lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
snake_case__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
snake_case__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
snake_case__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
snake_case__ = self.ror(lowerCamelCase , 6 ) ^ self.ror(lowerCamelCase , 11 ) ^ self.ror(lowerCamelCase , 25 )
snake_case__ = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
snake_case__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
snake_case__ = self.ror(lowerCamelCase , 2 ) ^ self.ror(lowerCamelCase , 13 ) ^ self.ror(lowerCamelCase , 22 )
snake_case__ = (a & b) ^ (a & c) ^ (b & c)
snake_case__ = (sa + maj) % 0X100_000_000
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
snake_case__ = [a, b, c, d, e, f, g, h]
# Modify final values
snake_case__ = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
snake_case__ = "".join([hex(lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def A_ ( self , lowerCamelCase , lowerCamelCase ):
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A_ ( self ):
import hashlib
snake_case__ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(lowerCamelCase ).hash , hashlib.shaaaa(lowerCamelCase ).hexdigest() )
def SCREAMING_SNAKE_CASE__ ( ):
import doctest
doctest.testmod()
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
snake_case__ = parser.parse_args()
snake_case__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
snake_case__ = f.read()
else:
snake_case__ = bytes(__lowerCAmelCase , "utf-8" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 530 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : Optional[Any] = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[str] = 'mask2former'
snake_case__ :Dict = ['swin']
snake_case__ :List[str] = {'hidden_size': 'hidden_dim'}
def __init__( self : Tuple , __magic_name__ : Optional[Dict] = None , __magic_name__ : int = 256 , __magic_name__ : int = 256 , __magic_name__ : int = 256 , __magic_name__ : int = 1024 , __magic_name__ : str = "relu" , __magic_name__ : int = 6 , __magic_name__ : int = 10 , __magic_name__ : int = 8 , __magic_name__ : float = 0.0 , __magic_name__ : int = 2048 , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : int = 4 , __magic_name__ : int = 255 , __magic_name__ : int = 100 , __magic_name__ : float = 0.1 , __magic_name__ : float = 2.0 , __magic_name__ : float = 5.0 , __magic_name__ : float = 5.0 , __magic_name__ : int = 12544 , __magic_name__ : float = 3.0 , __magic_name__ : float = 0.75 , __magic_name__ : float = 0.02 , __magic_name__ : float = 1.0 , __magic_name__ : bool = True , __magic_name__ : List[int] = [4, 8, 16, 32] , __magic_name__ : bool = None , **__magic_name__ : str , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
lowerCAmelCase__ = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__magic_name__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = backbone_config.pop("model_type" )
lowerCAmelCase__ = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ = config_class.from_dict(__magic_name__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
lowerCAmelCase__ = backbone_config
lowerCAmelCase__ = feature_size
lowerCAmelCase__ = mask_feature_size
lowerCAmelCase__ = hidden_dim
lowerCAmelCase__ = encoder_feedforward_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = encoder_layers
lowerCAmelCase__ = decoder_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = dropout
lowerCAmelCase__ = dim_feedforward
lowerCAmelCase__ = pre_norm
lowerCAmelCase__ = enforce_input_projection
lowerCAmelCase__ = common_stride
lowerCAmelCase__ = ignore_value
lowerCAmelCase__ = num_queries
lowerCAmelCase__ = no_object_weight
lowerCAmelCase__ = class_weight
lowerCAmelCase__ = mask_weight
lowerCAmelCase__ = dice_weight
lowerCAmelCase__ = train_num_points
lowerCAmelCase__ = oversample_ratio
lowerCAmelCase__ = importance_sample_ratio
lowerCAmelCase__ = init_std
lowerCAmelCase__ = init_xavier_std
lowerCAmelCase__ = use_auxiliary_loss
lowerCAmelCase__ = feature_strides
lowerCAmelCase__ = output_auxiliary_logits
lowerCAmelCase__ = decoder_layers
super().__init__(**__magic_name__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : PretrainedConfig , **__magic_name__ : str ):
"""simple docstring"""
return cls(
backbone_config=__magic_name__ , **__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.backbone_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 48 |
from math import factorial
class _UpperCAmelCase :
def __init__( self , a__ , a__ ):
A_ : Optional[int] = real
if isinstance(a__ , a__ ):
A_ : str = [1] * rank
else:
A_ : str = rank
def __repr__( self ):
return (
F"""{self.real}+"""
F"""{'+'.join(str(a__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _lowerCamelCase ( self ):
A_ : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , a__ )
def __add__( self , a__ ):
if not isinstance(a__ , a__ ):
return Dual(self.real + other , self.duals )
A_ : List[Any] = self.duals.copy()
A_ : Tuple = other.duals.copy()
if len(a__ ) > len(a__ ):
o_dual.extend([1] * (len(a__ ) - len(a__ )) )
elif len(a__ ) < len(a__ ):
s_dual.extend([1] * (len(a__ ) - len(a__ )) )
A_ : str = []
for i in range(len(a__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , a__ )
a = __add__
def __sub__( self , a__ ):
return self + other * -1
def __mul__( self , a__ ):
if not isinstance(a__ , a__ ):
A_ : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , a__ )
A_ : Tuple = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , a__ )
a = __mul__
def __truediv__( self , a__ ):
if not isinstance(a__ , a__ ):
A_ : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , a__ )
raise ValueError
def __floordiv__( self , a__ ):
if not isinstance(a__ , a__ ):
A_ : Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , a__ )
raise ValueError
def __pow__( self , a__ ):
if n < 0 or isinstance(a__ , a__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
A_ : str = self
for _ in range(n - 1 ):
x *= self
return x
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if not callable(_lowerCAmelCase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowerCAmelCase ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError("""differentiate() requires an int as input for order""" )
A_ : List[str] = Dual(_lowerCAmelCase ,1 )
A_ : Optional[int] = func(_lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 569 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class a ( UpperCAmelCase ):
_lowercase = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = None , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
_UpperCAmelCase : List[str] = size if size is not None else {"shortest_edge": 256}
_UpperCAmelCase : List[Any] = get_size_dict(A_ , default_to_square=A_ )
_UpperCAmelCase : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCAmelCase : List[Any] = get_size_dict(A_ )
_UpperCAmelCase : Optional[int] = do_resize
_UpperCAmelCase : List[str] = size
_UpperCAmelCase : Tuple = resample
_UpperCAmelCase : Dict = do_center_crop
_UpperCAmelCase : List[Any] = crop_size
_UpperCAmelCase : List[Any] = do_rescale
_UpperCAmelCase : Dict = rescale_factor
_UpperCAmelCase : Tuple = do_normalize
_UpperCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Dict = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCAmelCase : str = get_resize_output_image_size(A_ , size=size["shortest_edge"] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_size_dict(A_ )
return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ , A_ = None , **A_ ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ , A_ , A_ = None , **A_ , ):
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def _UpperCAmelCase ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[str] = size if size is not None else self.size
_UpperCAmelCase : List[Any] = get_size_dict(A_ , default_to_square=A_ )
_UpperCAmelCase : Tuple = resample if resample is not None else self.resample
_UpperCAmelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : Tuple = get_size_dict(A_ )
_UpperCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : int = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Optional[int] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_UpperCAmelCase : List[Any] = [to_numpy_array(A_ ) for image in images]
if do_resize:
_UpperCAmelCase : int = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
_UpperCAmelCase : Dict = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
_UpperCAmelCase : List[Any] = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
_UpperCAmelCase : Tuple = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
_UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_UpperCAmelCase : str = {"pixel_values": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 716 |
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Tuple , lowerCAmelCase: Any=None , **lowerCAmelCase: Any ) -> Dict:
_UpperCAmelCase : Optional[Any] = [x.strip() for x in open(lowerCAmelCase ).readlines()]
_UpperCAmelCase : str = [x.strip() for x in open(lowerCAmelCase ).readlines()][: len(lowerCAmelCase )]
_UpperCAmelCase : str = calculate_rouge(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
if save_path is not None:
save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 467 | 0 |
import argparse
from collections import defaultdict
import yaml
snake_case__ : Optional[Any] = "docs/source/en/_toctree.yml"
def _snake_case (__lowercase):
UpperCamelCase_ = defaultdict(UpperCamelCase__)
UpperCamelCase_ = []
UpperCamelCase_ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']})
else:
new_doc_list.append(UpperCamelCase__)
UpperCamelCase_ = new_doc_list
UpperCamelCase_ = [key for key, value in counts.items() if value > 1]
UpperCamelCase_ = []
for duplicate_key in duplicates:
UpperCamelCase_ = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key})
if len(UpperCamelCase__) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.')
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1])
UpperCamelCase_ = sorted(UpperCamelCase__ , key=lambda __lowercase: s["title"].lower())
# "overview" gets special treatment and is always first
if len(UpperCamelCase__) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.')
overview_doc.extend(UpperCamelCase__)
# Sort
return overview_doc
def _snake_case (__lowercase=False):
with open(UpperCamelCase__ , encoding='utf-8') as f:
UpperCamelCase_ = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase_ = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase_ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase_ = api_doc[scheduler_idx]['sections']
UpperCamelCase_ = clean_doc_toc(UpperCamelCase__)
UpperCamelCase_ = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase_ = True
if overwrite:
UpperCamelCase_ = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase_ = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8') as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__))
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
def _snake_case (__lowercase=False):
with open(UpperCamelCase__ , encoding='utf-8') as f:
UpperCamelCase_ = yaml.safe_load(f.read())
# Get to the API doc
UpperCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase_ = content[api_idx]['sections']
# Then to the model doc
UpperCamelCase_ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase_ = False
UpperCamelCase_ = api_doc[pipeline_idx]['sections']
UpperCamelCase_ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase_ = pipeline_doc['section']
UpperCamelCase_ = clean_doc_toc(UpperCamelCase__)
if overwrite:
UpperCamelCase_ = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__)
# sort overall pipeline doc
UpperCamelCase_ = clean_doc_toc(UpperCamelCase__)
if new_pipeline_docs != pipeline_docs:
UpperCamelCase_ = True
if overwrite:
UpperCamelCase_ = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase_ = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8') as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__))
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
if __name__ == "__main__":
snake_case__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
snake_case__ : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 23 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 0 |
import math
import random
def A ( snake_case__ : float , snake_case__ : bool = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCAmelCase__ : List[str] = 0.02
def A ( snake_case__ : int , snake_case__ : int ) -> float:
'''simple docstring'''
__snake_case = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(snake_case__ ):
# Forward propagation
__snake_case = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__snake_case = (expected / 100) - layer_a
# Error delta
__snake_case = layer_1_error * sigmoid_function(snake_case__ , snake_case__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Dict = int(input("Expected value: "))
UpperCAmelCase__ : Optional[Any] = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 676 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 1 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowercase : Optional[int] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 36 |
"""simple docstring"""
from math import factorial
def _lowercase ( __lowerCAmelCase = 100 ) -> int:
return sum(int(__lowerCAmelCase ) for x in str(factorial(__lowerCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 680 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : int ={
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class __snake_case ( __a ):
'''simple docstring'''
_snake_case = 'luke'
def __init__( self : str , _UpperCamelCase : Tuple=5_0267 , _UpperCamelCase : Any=50_0000 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : int=256 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : List[str]=12 , _UpperCamelCase : List[str]=3072 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : int=512 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[int]=0.0_2 , _UpperCamelCase : Optional[int]=1E-1_2 , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[str]=None , _UpperCamelCase : List[Any]=1 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : Any=2 , **_UpperCamelCase : Optional[int] , ) ->List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__)
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Optional[int] = entity_vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : List[str] = entity_emb_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = layer_norm_eps
_lowerCamelCase : List[str] = use_entity_aware_attention
_lowerCamelCase : int = classifier_dropout
| 721 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = int(UpperCAmelCase__ )
assert noofclusters < len(UpperCAmelCase__ )
# Find out the dimensionality
a_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
a_ = list(range(len(UpperCAmelCase__ ) ) )
shuffle(UpperCAmelCase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
a_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
a_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
a_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCAmelCase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
a_ = tf.placeholder("""float64""" , [dim] )
a_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCAmelCase__ , UpperCAmelCase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
a_ = [tf.Variable(0 ) for i in range(len(UpperCAmelCase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
a_ = tf.placeholder("""int32""" )
a_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCAmelCase__ , UpperCAmelCase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
a_ = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
a_ = tf.reduce_mean(UpperCAmelCase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
a_ = tf.placeholder("""float""" , [dim] )
a_ = tf.placeholder("""float""" , [dim] )
a_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCAmelCase__ , UpperCAmelCase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
a_ = tf.placeholder("""float""" , [noofclusters] )
a_ = tf.argmin(UpperCAmelCase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
a_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCAmelCase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
a_ = 100
for _ in range(UpperCAmelCase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCAmelCase__ ) ):
a_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
a_ = [
sess.run(UpperCAmelCase__ , feed_dict={va: vect, va: sess.run(UpperCAmelCase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
a_ = sess.run(
UpperCAmelCase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCAmelCase__ ):
# Collect all the vectors assigned to this cluster
a_ = [
vectors[i]
for i in range(len(UpperCAmelCase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
a_ = sess.run(
UpperCAmelCase__ , feed_dict={mean_input: array(UpperCAmelCase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
a_ = sess.run(UpperCAmelCase__ )
a_ = sess.run(UpperCAmelCase__ )
return centroids, assignments | 483 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] =logging.get_logger(__name__)
A_ : Union[str, Any] ={}
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case_ = '''llama'''
snake_case_ = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32_000 , _UpperCAmelCase=4_096 , _UpperCAmelCase=11_008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2_048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
"""simple docstring"""
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = intermediate_size
a_ = num_hidden_layers
a_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a_ = num_attention_heads
a_ = num_key_value_heads
a_ = hidden_act
a_ = initializer_range
a_ = rms_norm_eps
a_ = pretraining_tp
a_ = use_cache
a_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def lowercase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
a_ = self.rope_scaling.get("""type""" , _UpperCAmelCase )
a_ = self.rope_scaling.get("""factor""" , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 483 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ :List[str] = logging.get_logger(__name__)
a_ :List[Any] = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
a_ :Dict = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
a_ :Dict = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
a_ :Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
a_ :List[str] = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
a_ :str = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
a_ :Any = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
a_ :Optional[int] = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
a_ :List[str] = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
a_ :Any = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
a_ :Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
a_ :Any = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
a_ :Optional[int] = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
a_ :Optional[int] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
a_ :Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ :Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ :List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ :Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ :List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ :str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ :Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ :Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ :Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ :List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ :Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ :Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ :Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ :Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
a_ :int = auto_class_update(FlaxAutoModel)
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ :str = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ :Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ :Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ :str = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ :Dict = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ :Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ :Optional[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ :List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ :Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ :Optional[int] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ :List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class snake_case__ ( _BaseAutoModelClass ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ :Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 243 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a_ :Dict = logging.getLogger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """summarization"""
_SCREAMING_SNAKE_CASE = ["""loss"""]
_SCREAMING_SNAKE_CASE = ROUGE_KEYS
_SCREAMING_SNAKE_CASE = """rouge2"""
def __init__( self : Dict, _snake_case : Dict, **_snake_case : str ) ->List[Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
snake_case__ : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(_snake_case, num_labels=_snake_case, mode=self.mode, **_snake_case )
use_task_specific_params(self.model, 'summarization' )
save_git_info(self.hparams.output_dir )
snake_case__ : List[str] = Path(self.output_dir ) / 'metrics.json'
snake_case__ : Tuple = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams, self.hparams_save_path )
snake_case__ : List[str] = 0
snake_case__ : int = defaultdict(_snake_case )
snake_case__ : Optional[Any] = self.config.model_type
snake_case__ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
snake_case__ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
snake_case__ : Union[str, Any] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
snake_case__ : int = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
snake_case__ : Union[str, Any] = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
snake_case__ : Optional[Any] = get_git_info()['repo_sha']
snake_case__ : Any = hparams.num_workers
snake_case__ : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, _snake_case ):
snake_case__ : int = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
snake_case__ : Optional[Any] = self.decoder_start_token_id
snake_case__ : int = (
SeqaSeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
snake_case__ : Tuple = False
snake_case__ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
snake_case__ : Any = self.hparams.eval_max_gen_length
else:
snake_case__ : List[Any] = self.model.config.max_length
snake_case__ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowercase_ ( self : Tuple, _snake_case : Dict[str, torch.Tensor] ) ->Dict[str, List[str]]:
snake_case__ : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case, Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir ) / 'tok_batch.json' )
snake_case__ : Optional[int] = True
return readable_batch
def lowercase_ ( self : List[Any], _snake_case : Dict, **_snake_case : str ) ->int:
return self.model(_snake_case, **_snake_case )
def lowercase_ ( self : str, _snake_case : List[int] ) ->List[str]:
snake_case__ : int = self.tokenizer.batch_decode(
_snake_case, skip_special_tokens=_snake_case, clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip, _snake_case )
def lowercase_ ( self : List[str], _snake_case : dict ) ->Tuple:
snake_case__ : List[str] = self.tokenizer.pad_token_id
snake_case__ , snake_case__ : Any = batch['input_ids'], batch['attention_mask']
snake_case__ : Optional[int] = batch['labels']
if isinstance(self.model, _snake_case ):
snake_case__ : Union[str, Any] = self.model._shift_right(_snake_case )
else:
snake_case__ : List[Any] = shift_tokens_right(_snake_case, _snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
snake_case__ : Optional[int] = decoder_input_ids
self.save_readable_batch(_snake_case )
snake_case__ : List[str] = self(_snake_case, attention_mask=_snake_case, decoder_input_ids=_snake_case, use_cache=_snake_case )
snake_case__ : Dict = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
snake_case__ : Any = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
snake_case__ : Optional[Any] = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1] ), tgt_ids.view(-1 ) )
else:
snake_case__ : str = nn.functional.log_softmax(_snake_case, dim=-1 )
snake_case__ , snake_case__ : Union[str, Any] = label_smoothed_nll_loss(
_snake_case, _snake_case, self.hparams.label_smoothing, ignore_index=_snake_case )
return (loss,)
@property
def lowercase_ ( self : Dict ) ->int:
return self.tokenizer.pad_token_id
def lowercase_ ( self : Union[str, Any], _snake_case : List[str], _snake_case : Any ) ->Dict:
snake_case__ : Dict = self._step(_snake_case )
snake_case__ : Optional[int] = dict(zip(self.loss_names, _snake_case ) )
# tokens per batch
snake_case__ : Optional[Any] = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
snake_case__ : List[str] = batch['input_ids'].shape[0]
snake_case__ : List[str] = batch['input_ids'].eq(self.pad ).sum()
snake_case__ : Optional[Any] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowercase_ ( self : Union[str, Any], _snake_case : Union[str, Any], _snake_case : List[str] ) ->Dict:
return self._generative_step(_snake_case )
def lowercase_ ( self : int, _snake_case : Dict, _snake_case : List[Any]="val" ) ->Dict:
self.step_count += 1
snake_case__ : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
snake_case__ : Optional[int] = losses['loss']
snake_case__ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
snake_case__ : Optional[int] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
snake_case__ : torch.FloatTensor = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
snake_case__ : Optional[int] = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
snake_case__ : List[str] = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
snake_case__ : List[str] = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : Optional[int] ) ->Dict:
return calculate_rouge(_snake_case, _snake_case )
def lowercase_ ( self : Optional[int], _snake_case : dict ) ->dict:
snake_case__ : Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
snake_case__ : List[str] = self.model.generate(
batch['input_ids'], attention_mask=batch['attention_mask'], use_cache=_snake_case, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, )
snake_case__ : str = (time.time() - ta) / batch['input_ids'].shape[0]
snake_case__ : List[str] = self.ids_to_clean_text(_snake_case )
snake_case__ : List[str] = self.ids_to_clean_text(batch['labels'] )
snake_case__ : List[Any] = self._step(_snake_case )
snake_case__ : Any = dict(zip(self.loss_names, _snake_case ) )
snake_case__ : Dict = self.calc_generative_metrics(_snake_case, _snake_case )
snake_case__ : int = np.mean(lmap(_snake_case, _snake_case ) )
base_metrics.update(gen_time=_snake_case, gen_len=_snake_case, preds=_snake_case, target=_snake_case, **_snake_case )
return base_metrics
def lowercase_ ( self : Tuple, _snake_case : Dict, _snake_case : Union[str, Any] ) ->Tuple:
return self._generative_step(_snake_case )
def lowercase_ ( self : Dict, _snake_case : Union[str, Any] ) ->str:
return self.validation_epoch_end(_snake_case, prefix='test' )
def lowercase_ ( self : Union[str, Any], _snake_case : Any ) ->SeqaSeqDataset:
snake_case__ : Optional[int] = self.n_obs[type_path]
snake_case__ : str = self.target_lens[type_path]
snake_case__ : Optional[int] = self.dataset_class(
self.tokenizer, type_path=_snake_case, n_obs=_snake_case, max_target_length=_snake_case, **self.dataset_kwargs, )
return dataset
def lowercase_ ( self : Any, _snake_case : str, _snake_case : int, _snake_case : bool = False ) ->DataLoader:
snake_case__ : Union[str, Any] = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
snake_case__ : str = dataset.make_sortish_sampler(_snake_case, distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case, batch_size=_snake_case, collate_fn=dataset.collate_fn, shuffle=_snake_case, num_workers=self.num_workers, sampler=_snake_case, )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
snake_case__ : Dict = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case, batch_sampler=_snake_case, collate_fn=dataset.collate_fn, num_workers=self.num_workers, )
else:
return DataLoader(
_snake_case, batch_size=_snake_case, collate_fn=dataset.collate_fn, shuffle=_snake_case, num_workers=self.num_workers, sampler=_snake_case, )
def lowercase_ ( self : int ) ->DataLoader:
snake_case__ : Union[str, Any] = self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=_snake_case )
return dataloader
def lowercase_ ( self : str ) ->DataLoader:
return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size )
def lowercase_ ( self : List[Any] ) ->DataLoader:
return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowercase_ ( _snake_case : Dict, _snake_case : str ) ->str:
BaseTransformer.add_model_specific_args(_snake_case, _snake_case )
add_generic_args(_snake_case, _snake_case )
parser.add_argument(
'--max_source_length', default=1_0_2_4, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--max_target_length', default=5_6, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--val_max_target_length', default=1_4_2, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument(
'--test_max_target_length', default=1_4_2, type=_snake_case, help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
), )
parser.add_argument('--freeze_encoder', action='store_true' )
parser.add_argument('--freeze_embeds', action='store_true' )
parser.add_argument('--sortish_sampler', action='store_true', default=_snake_case )
parser.add_argument('--overwrite_output_dir', action='store_true', default=_snake_case )
parser.add_argument('--max_tokens_per_batch', type=_snake_case, default=_snake_case )
parser.add_argument('--logger_name', type=_snake_case, choices=['default', 'wandb', 'wandb_shared'], default='default' )
parser.add_argument('--n_train', type=_snake_case, default=-1, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--n_val', type=_snake_case, default=5_0_0, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--n_test', type=_snake_case, default=-1, required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument(
'--task', type=_snake_case, default='summarization', required=_snake_case, help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing', type=_snake_case, default=0.0, required=_snake_case )
parser.add_argument('--src_lang', type=_snake_case, default='', required=_snake_case )
parser.add_argument('--tgt_lang', type=_snake_case, default='', required=_snake_case )
parser.add_argument('--eval_beams', type=_snake_case, default=_snake_case, required=_snake_case )
parser.add_argument(
'--val_metric', type=_snake_case, default=_snake_case, required=_snake_case, choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length', type=_snake_case, default=_snake_case, help='never generate more than n tokens' )
parser.add_argument('--save_top_k', type=_snake_case, default=1, required=_snake_case, help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience', type=_snake_case, default=-1, required=_snake_case, help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
), )
return parser
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """translation"""
_SCREAMING_SNAKE_CASE = ["""loss"""]
_SCREAMING_SNAKE_CASE = ["""bleu"""]
_SCREAMING_SNAKE_CASE = """bleu"""
def __init__( self : List[Any], _snake_case : str, **_snake_case : Tuple ) ->List[str]:
super().__init__(_snake_case, **_snake_case )
snake_case__ : Any = hparams.src_lang
snake_case__ : int = hparams.tgt_lang
def lowercase_ ( self : Union[str, Any], _snake_case : List[Any], _snake_case : List[Any] ) ->dict:
return calculate_bleu(_snake_case, _snake_case )
def lowercase_ (A : str , A : List[Any]=None ):
Path(args.output_dir ).mkdir(exist_ok=A )
check_output_dir(A , expected_items=3 )
if model is None:
if "summarization" in args.task:
snake_case__ : SummarizationModule = SummarizationModule(A )
else:
snake_case__ : SummarizationModule = TranslationModule(A )
snake_case__ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
snake_case__ : List[Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
snake_case__ : Dict = os.environ.get('WANDB_PROJECT' , A )
snake_case__ : Union[str, Any] = WandbLogger(name=model.output_dir.name , project=A )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
snake_case__ : List[Any] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
snake_case__ : Union[str, Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
snake_case__ : List[str] = False
snake_case__ : List[Any] = args.val_metric == 'loss'
snake_case__ : pl.Trainer = generic_train(
A , A , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , A ) , early_stopping_callback=A , logger=A , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
snake_case__ : int = ''
snake_case__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=A ) )
if checkpoints:
snake_case__ : Any = checkpoints[-1]
snake_case__ : List[str] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a_ :Any = argparse.ArgumentParser()
a_ :Dict = pl.Trainer.add_argparse_args(parser)
a_ :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a_ :Dict = parser.parse_args()
main(args)
| 243 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 17 | '''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __snake_case ( lowerCAmelCase : int = 200_0000 ):
__UpperCAmelCase = [0]
__UpperCAmelCase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__UpperCAmelCase = 0
# the area corresponding to the grid that gives the product closest to target
__UpperCAmelCase = 0
# an estimate of b, using the quadratic formula
__UpperCAmelCase = 42
# the largest integer less than b_estimate
__UpperCAmelCase = 42
# the largest integer less than b_estimate
__UpperCAmelCase = 42
# the triangle number corresponding to b_floor
__UpperCAmelCase = 42
# the triangle number corresponding to b_ceil
__UpperCAmelCase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__UpperCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__UpperCAmelCase = floor(lowerCAmelCase )
__UpperCAmelCase = ceil(lowerCAmelCase )
__UpperCAmelCase = triangle_numbers[b_floor]
__UpperCAmelCase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase = triangle_b_first_guess * triangle_a
__UpperCAmelCase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__UpperCAmelCase = triangle_b_second_guess * triangle_a
__UpperCAmelCase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 396 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
def __init__( self : str , *a__ : Dict , **a__ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ ) | 621 |
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 621 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , snake_case_ : List[Any] , snake_case_ : Any=13 , snake_case_ : int=7 , snake_case_ : Tuple=True , snake_case_ : Union[str, Any]=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=True , snake_case_ : Tuple=99 , snake_case_ : Optional[int]=64 , snake_case_ : Optional[Any]=5 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[Any]=37 , snake_case_ : Optional[Any]="gelu" , snake_case_ : int=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Optional[Any]=512 , snake_case_ : Any=16 , snake_case_ : int=2 , snake_case_ : Tuple=0.02 , snake_case_ : Optional[int]=3 , snake_case_ : Optional[Any]=4 , snake_case_ : Dict=None , ):
"""simple docstring"""
A : Any = parent
A : str = batch_size
A : Optional[int] = seq_length
A : int = is_training
A : Optional[Any] = use_input_mask
A : List[Any] = use_token_type_ids
A : int = use_labels
A : int = vocab_size
A : str = hidden_size
A : Tuple = num_hidden_layers
A : List[Any] = num_attention_heads
A : int = intermediate_size
A : List[Any] = hidden_act
A : Any = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : Optional[int] = type_vocab_size
A : Union[str, Any] = type_sequence_label_size
A : int = initializer_range
A : Optional[Any] = num_labels
A : Optional[Any] = num_choices
A : Any = scope
A : Dict = vocab_size - 1
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : int = None
if self.use_input_mask:
A : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
A : Tuple = None
if self.use_labels:
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Union[str, Any] = self.prepare_config_and_inputs()
A : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int ):
"""simple docstring"""
A : str = GPTNeoXModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A : Dict = model(snake_case_ , attention_mask=snake_case_ )
A : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[Any] ):
"""simple docstring"""
A : List[str] = True
A : Any = GPTNeoXModel(snake_case_ )
model.to(snake_case_ )
model.eval()
A : Any = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
"""simple docstring"""
A : Any = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : str ):
"""simple docstring"""
A : Optional[Any] = self.num_labels
A : int = GPTNeoXForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
A : Any = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : Dict , snake_case_ : str , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Tuple ):
"""simple docstring"""
A : int = self.num_labels
A : int = GPTNeoXForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : str = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : Union[str, Any] ):
"""simple docstring"""
A : Optional[int] = self.num_labels
A : List[Any] = GPTNeoXForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Any , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Optional[int] ):
"""simple docstring"""
A : Any = True
A : Tuple = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
A : int = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
A : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
A : Dict = torch.cat([input_mask, next_mask] , dim=-1 )
A : Dict = model(snake_case_ , attention_mask=snake_case_ , output_hidden_states=snake_case_ )
A : Tuple = output_from_no_past["hidden_states"][0]
A : int = model(
snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["hidden_states"][0]
# select random slice
A : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Optional[int] = self.prepare_config_and_inputs()
A : List[str] = config_and_inputs
A : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
lowerCamelCase_ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Optional[Any] = GPTNeoXModelTester(self )
A : List[str] = ConfigTester(self , config_class=snake_case_ , hidden_size=64 , num_attention_heads=8 )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_decoder()
A : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ , snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _UpperCAmelCase ( self : List[Any] , snake_case_ : Tuple ):
"""simple docstring"""
A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : Optional[int] = ids_tensor([1, 10] , config.vocab_size )
A : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : int = GPTNeoXModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
A : Any = original_model(snake_case_ ).last_hidden_state
A : Dict = original_model(snake_case_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A : int = {"type": scaling_type, "factor": 10.0}
A : List[str] = GPTNeoXModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
A : List[Any] = scaled_model(snake_case_ ).last_hidden_state
A : str = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : Optional[int] = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
A : str = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case_ )
A : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(snake_case_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A : Any = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
A : List[str] = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=20 )
A : int = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ ) | 256 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "token-classification"
def __init__( self : Tuple , lowerCAmelCase_ : Tuple):
"""simple docstring"""
if type(lowerCAmelCase_) == dict:
lowercase_ = Namespace(**lowerCAmelCase_)
lowercase_ = import_module("""tasks""")
try:
lowercase_ = getattr(lowerCAmelCase_ , hparams.task_type)
lowercase_ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''')
lowercase_ = self.token_classification_task.get_labels(hparams.labels)
lowercase_ = CrossEntropyLoss().ignore_index
super().__init__(lowerCAmelCase_ , len(self.labels) , self.mode)
def _UpperCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return self.model(**lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict):
"""simple docstring"""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowercase_ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase_ = self(**lowerCAmelCase_)
lowercase_ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.hparams
for mode in ["train", "dev", "test"]:
lowercase_ = self._feature_file(lowerCAmelCase_)
if os.path.exists(lowerCAmelCase_) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
lowercase_ = torch.load(lowerCAmelCase_)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowercase_ = self.token_classification_task.read_examples_from_file(args.data_dir , lowerCAmelCase_)
lowercase_ = self.token_classification_task.convert_examples_to_features(
lowerCAmelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""]) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowerCAmelCase_ , pad_on_left=bool(self.config.model_type in ["""xlnet"""]) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , lowerCAmelCase_)
torch.save(lowerCAmelCase_ , lowerCAmelCase_)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False):
"""simple docstring"""
lowercase_ = self._feature_file(lowerCAmelCase_)
logger.info("""Loading features from cached file %s""" , lowerCAmelCase_)
lowercase_ = torch.load(lowerCAmelCase_)
lowercase_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowercase_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
lowercase_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
lowercase_ = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
lowercase_ = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) , batch_size=lowerCAmelCase_)
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict):
"""simple docstring"""
"""Compute validation""" ""
lowercase_ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
lowercase_ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase_ = self(**lowerCAmelCase_)
lowercase_ , lowercase_ = outputs[:2]
lowercase_ = logits.detach().cpu().numpy()
lowercase_ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : int):
"""simple docstring"""
lowercase_ = torch.stack([x["""val_loss"""] for x in outputs]).mean()
lowercase_ = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
lowercase_ = np.argmax(lowerCAmelCase_ , axis=2)
lowercase_ = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowercase_ = dict(enumerate(self.labels))
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
lowercase_ = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
lowercase_ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(lowerCAmelCase_ , lowerCAmelCase_),
"""precision""": precision_score(lowerCAmelCase_ , lowerCAmelCase_),
"""recall""": recall_score(lowerCAmelCase_ , lowerCAmelCase_),
"""f1""": fa_score(lowerCAmelCase_ , lowerCAmelCase_),
}
lowercase_ = dict(results.items())
lowercase_ = results
return ret, preds_list, out_label_list
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
lowercase_ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]):
"""simple docstring"""
lowercase_ , lowercase_ , lowercase_ = self._eval_end(lowerCAmelCase_)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase_ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCAmelCase ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
BaseTransformer.add_model_specific_args(lowerCAmelCase_ , lowerCAmelCase_)
parser.add_argument(
"""--task_type""" , default="""NER""" , type=lowerCAmelCase_ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""")
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=lowerCAmelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=lowerCAmelCase_ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowerCAmelCase_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = NERTransformer(args)
UpperCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
UpperCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a : Tuple = logging.get_logger('''transformers.models.speecht5''')
a : Tuple = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
a : Optional[int] = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
a : List[str] = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
a : Optional[Any] = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
a : Any = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
a : Optional[int] = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
a : Any = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
a : List[str] = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
a : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a : Dict = []
a : int = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
a : str = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
a : Optional[Any] = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
a : List[str] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split(""".""" ):
_UpperCAmelCase = getattr(A__ , A__ )
if weight_type is not None:
_UpperCAmelCase = getattr(A__ , A__ ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = []
if task == "s2t":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2T
_UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_UpperCAmelCase = None
_UpperCAmelCase = MAPPING_T2S
_UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2S
_UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(A__ , A__ ):
logger.info(F"""{name} was ignored""" )
continue
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == """group""" , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_UpperCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
_UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(A__ )[0].split(""".""" )[-2]
_UpperCAmelCase = mapped_key.replace("""*""" , A__ )
if "weight_g" in name:
_UpperCAmelCase = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase = "weight_v"
elif "bias" in name:
_UpperCAmelCase = "bias"
elif "weight" in name:
_UpperCAmelCase = "weight"
elif "running_mean" in name:
_UpperCAmelCase = "running_mean"
elif "running_var" in name:
_UpperCAmelCase = "running_var"
elif "num_batches_tracked" in name:
_UpperCAmelCase = "num_batches_tracked"
else:
_UpperCAmelCase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
_UpperCAmelCase = name.split(""".""" )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A__ )
@torch.no_grad()
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , _A=None , ) -> str:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = SpeechTaConfig.from_pretrained(A__ )
else:
_UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
_UpperCAmelCase = config.max_text_positions
_UpperCAmelCase = SpeechTaForSpeechToText(A__ )
elif task == "t2s":
_UpperCAmelCase = 1_8_7_6
_UpperCAmelCase = 6_0_0
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForTextToSpeech(A__ )
elif task == "s2s":
_UpperCAmelCase = 1_8_7_6
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForSpeechToSpeech(A__ )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
_UpperCAmelCase = SpeechTaTokenizer(A__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken("""<mask>""" , lstrip=A__ , rstrip=A__ )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = SpeechTaProcessor(tokenizer=A__ , feature_extractor=A__ )
processor.save_pretrained(A__ )
_UpperCAmelCase = torch.load(A__ )
recursively_load_weights(fairseq_checkpoint["""model"""] , A__ , A__ )
model.save_pretrained(A__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(A__ )
model.push_to_hub(A__ )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
a : List[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 555 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCamelCase_ = TypeVar('''T''')
lowerCamelCase_ = Union[List[T], Tuple[T, ...]]
lowerCamelCase_ = Union[T, List[T], Dict[str, T]]
lowerCamelCase_ = Union[str, bytes, os.PathLike]
| 95 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Optional[Any] = ['''vqvae''']
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase , mel=UpperCAmelCase , vqvae=UpperCAmelCase)
def A__ (self):
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , UpperCAmelCase) else 1_0_0_0
@torch.no_grad()
def __call__(self , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=True , ):
'''simple docstring'''
__UpperCAmelCase =steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase)
__UpperCAmelCase =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
__UpperCAmelCase =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__UpperCAmelCase =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase , device=self.device , )
__UpperCAmelCase =noise
__UpperCAmelCase =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase , UpperCAmelCase)
__UpperCAmelCase =self.mel.audio_slice_to_image(UpperCAmelCase)
__UpperCAmelCase =np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
__UpperCAmelCase =(input_image / 2_5_5) * 2 - 1
__UpperCAmelCase =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
__UpperCAmelCase =self.vqvae.encode(torch.unsqueeze(UpperCAmelCase , 0)).latent_dist.sample(
generator=UpperCAmelCase)[0]
__UpperCAmelCase =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__UpperCAmelCase =self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , self.scheduler.timesteps[start_step - 1])
__UpperCAmelCase =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__UpperCAmelCase =int(mask_start_secs * pixels_per_second)
__UpperCAmelCase =int(mask_end_secs * pixels_per_second)
__UpperCAmelCase =self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase):
__UpperCAmelCase =self.unet(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)['''sample''']
else:
__UpperCAmelCase =self.unet(UpperCAmelCase , UpperCAmelCase)['''sample''']
if isinstance(self.scheduler , UpperCAmelCase):
__UpperCAmelCase =self.scheduler.step(
model_output=UpperCAmelCase , timestep=UpperCAmelCase , sample=UpperCAmelCase , eta=UpperCAmelCase , generator=UpperCAmelCase , )['''prev_sample''']
else:
__UpperCAmelCase =self.scheduler.step(
model_output=UpperCAmelCase , timestep=UpperCAmelCase , sample=UpperCAmelCase , generator=UpperCAmelCase , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
__UpperCAmelCase =mask[:, step, :, :mask_start]
if mask_end > 0:
__UpperCAmelCase =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__UpperCAmelCase =1 / self.vqvae.config.scaling_factor * images
__UpperCAmelCase =self.vqvae.decode(UpperCAmelCase)['''sample''']
__UpperCAmelCase =(images / 2 + 0.5).clamp(0 , 1)
__UpperCAmelCase =images.cpu().permute(0 , 2 , 3 , 1).numpy()
__UpperCAmelCase =(images * 2_5_5).round().astype('''uint8''')
__UpperCAmelCase =list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase , mode='''RGB''').convert('''L''') for _ in images))
__UpperCAmelCase =[self.mel.image_to_audio(UpperCAmelCase) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase))
@torch.no_grad()
def A__ (self , UpperCAmelCase , UpperCAmelCase = 5_0):
'''simple docstring'''
assert isinstance(self.scheduler , UpperCAmelCase)
self.scheduler.set_timesteps(UpperCAmelCase)
__UpperCAmelCase =np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
__UpperCAmelCase =(sample / 2_5_5) * 2 - 1
__UpperCAmelCase =torch.Tensor(UpperCAmelCase).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
__UpperCAmelCase =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__UpperCAmelCase =self.scheduler.alphas_cumprod[t]
__UpperCAmelCase =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__UpperCAmelCase =1 - alpha_prod_t
__UpperCAmelCase =self.unet(UpperCAmelCase , UpperCAmelCase)['''sample''']
__UpperCAmelCase =(1 - alpha_prod_t_prev) ** 0.5 * model_output
__UpperCAmelCase =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__UpperCAmelCase =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def A__ (UpperCAmelCase , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =acos(torch.dot(torch.flatten(UpperCAmelCase) , torch.flatten(UpperCAmelCase)) / torch.norm(UpperCAmelCase) / torch.norm(UpperCAmelCase))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase) + sin(alpha * theta) * xa / sin(UpperCAmelCase)
| 142 |
def SCREAMING_SNAKE_CASE ( snake_case__ = 1000 ) -> int:
__UpperCAmelCase =2**power
__UpperCAmelCase =0
while n:
__UpperCAmelCase , __UpperCAmelCase =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 142 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class A__ :
def __init__( self , A_ , A_ = 0.99_99 , A_ = 0.0 , A_ = 0 , A_ = False , A_ = 1.0 , A_ = 2 / 3 , A_ = None , A_ = None , **A_ , ):
'''simple docstring'''
if isinstance(snake_case_ , torch.nn.Module ):
UpperCamelCase : int = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , snake_case_ , standard_warn=snake_case_ , )
UpperCamelCase : Any = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCamelCase : Tuple = True
if kwargs.get("max_value" , snake_case_ ) is not None:
UpperCamelCase : Dict = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate("max_value" , "1.0.0" , snake_case_ , standard_warn=snake_case_ )
UpperCamelCase : Union[str, Any] = kwargs['''max_value''']
if kwargs.get("min_value" , snake_case_ ) is not None:
UpperCamelCase : Dict = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate("min_value" , "1.0.0" , snake_case_ , standard_warn=snake_case_ )
UpperCamelCase : Union[str, Any] = kwargs['''min_value''']
UpperCamelCase : List[Any] = list(snake_case_ )
UpperCamelCase : List[Any] = [p.clone().detach() for p in parameters]
if kwargs.get("device" , snake_case_ ) is not None:
UpperCamelCase : str = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate("device" , "1.0.0" , snake_case_ , standard_warn=snake_case_ )
self.to(device=kwargs["device"] )
UpperCamelCase : str = None
UpperCamelCase : List[Any] = decay
UpperCamelCase : Dict = min_decay
UpperCamelCase : Dict = update_after_step
UpperCamelCase : Optional[Any] = use_ema_warmup
UpperCamelCase : Dict = inv_gamma
UpperCamelCase : Dict = power
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Optional[int] = None # set in `step()`
UpperCamelCase : Optional[int] = model_cls
UpperCamelCase : Union[str, Any] = model_config
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = model_cls.load_config(snake_case_ , return_unused_kwargs=snake_case_ )
UpperCamelCase : Any = model_cls.from_pretrained(snake_case_ )
UpperCamelCase : int = cls(model.parameters() , model_cls=snake_case_ , model_config=model.config )
ema_model.load_state_dict(snake_case_ )
return ema_model
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
UpperCamelCase : Tuple = self.model_cls.from_config(self.model_config )
UpperCamelCase : int = self.state_dict()
state_dict.pop("shadow_params" , snake_case_ )
model.register_to_config(**snake_case_ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCamelCase : Optional[Any] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCamelCase : str = (1 + step) / (10 + step)
UpperCamelCase : Tuple = min(snake_case_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCamelCase : Union[str, Any] = max(snake_case_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if isinstance(snake_case_ , torch.nn.Module ):
UpperCamelCase : Tuple = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , snake_case_ , standard_warn=snake_case_ , )
UpperCamelCase : Tuple = parameters.parameters()
UpperCamelCase : str = list(snake_case_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCamelCase : Optional[Any] = self.get_decay(self.optimization_step )
UpperCamelCase : List[Any] = decay
UpperCamelCase : int = 1 - decay
UpperCamelCase : List[str] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCamelCase : str = deepspeed.zero.GatheredParameters(snake_case_ , modifier_rank=snake_case_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = list(snake_case_ )
for s_param, param in zip(self.shadow_params , snake_case_ ):
param.data.copy_(s_param.to(param.device ).data )
def __UpperCamelCase( self , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : Tuple = [
p.to(device=snake_case_ , dtype=snake_case_ ) if p.is_floating_point() else p.to(device=snake_case_ )
for p in self.shadow_params
]
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = [param.detach().cpu().clone() for param in parameters]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , snake_case_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCamelCase : List[str] = None
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : int = copy.deepcopy(snake_case_ )
UpperCamelCase : Tuple = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
UpperCamelCase : Tuple = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , snake_case_ ):
raise ValueError("Invalid min_decay" )
UpperCamelCase : Tuple = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , snake_case_ ):
raise ValueError("Invalid optimization_step" )
UpperCamelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , snake_case_ ):
raise ValueError("Invalid update_after_step" )
UpperCamelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case_ ):
raise ValueError("Invalid use_ema_warmup" )
UpperCamelCase : Tuple = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
UpperCamelCase : List[str] = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
UpperCamelCase : Dict = state_dict.get("shadow_params" , snake_case_ )
if shadow_params is not None:
UpperCamelCase : Any = shadow_params
if not isinstance(self.shadow_params , snake_case_ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(snake_case_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 629 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def _a ( __lowerCAmelCase : Dict ):
"""simple docstring"""
snake_case__ : Any = np.max(__lowerCAmelCase , axis=-1 , keepdims=__lowerCAmelCase )
snake_case__ : List[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCAmelCase )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __magic_name__ ( self : List[Any] , **snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : List[Any] = {}
if "second_text" in kwargs:
snake_case__ : Any = kwargs['''second_text''']
return preprocess_kwargs, {}, {}
def __magic_name__ ( self : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[int]=None ):
'''simple docstring'''
return self.tokenizer(snake_case_ , text_pair=snake_case_ , return_tensors=self.framework )
def __magic_name__ ( self : List[str] , snake_case_ : List[Any] ):
'''simple docstring'''
return self.model(**snake_case_ )
def __magic_name__ ( self : Any , snake_case_ : List[Any] ):
'''simple docstring'''
snake_case__ : int = model_outputs.logits[0].numpy()
snake_case__ : Any = softmax(snake_case_ )
snake_case__ : int = np.argmax(snake_case_ )
snake_case__ : Union[str, Any] = self.model.config.idalabel[best_class]
snake_case__ : List[Any] = probabilities[best_class].item()
snake_case__ : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 347 | 0 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
a__ : Union[str, Any] = 3_0_0 # TEMPERATURE (unit = K)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 553 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a__ : Dict = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ : Dict = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
__SCREAMING_SNAKE_CASE = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
__SCREAMING_SNAKE_CASE = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" , return_all_scores=UpperCAmelCase__ )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" , return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
__SCREAMING_SNAKE_CASE = text_classifier(["This is great !", "Something else"] , return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
__SCREAMING_SNAKE_CASE = text_classifier(["This is great !", "Something else"] , return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
import torch
__SCREAMING_SNAKE_CASE = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE = pipeline("text-classification" )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
__SCREAMING_SNAKE_CASE = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
__SCREAMING_SNAKE_CASE = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = pipeline("text-classification" , framework="tf" )
__SCREAMING_SNAKE_CASE = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
__SCREAMING_SNAKE_CASE = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
__SCREAMING_SNAKE_CASE = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.988}] )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__SCREAMING_SNAKE_CASE = "HuggingFace is in"
__SCREAMING_SNAKE_CASE = text_classifier(UpperCAmelCase__ )
self.assertEqual(nested_simplify(UpperCAmelCase__ ) , [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
__SCREAMING_SNAKE_CASE = ["HuggingFace is in ", "Paris is in France"]
__SCREAMING_SNAKE_CASE = text_classifier(UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}, {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__SCREAMING_SNAKE_CASE = text_classifier(UpperCAmelCase__ , top_k=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [[{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N, [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N] , )
__SCREAMING_SNAKE_CASE = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
__SCREAMING_SNAKE_CASE = text_classifier(UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__SCREAMING_SNAKE_CASE = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(UpperCAmelCase__ ):
text_classifier(UpperCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__SCREAMING_SNAKE_CASE = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ) , [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 553 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
_SCREAMING_SNAKE_CASE = TypeVar("T")
_SCREAMING_SNAKE_CASE = Union[List[T], Tuple[T, ...]]
_SCREAMING_SNAKE_CASE = Union[T, List[T], Dict[str, T]]
_SCREAMING_SNAKE_CASE = Union[str, bytes, os.PathLike]
| 181 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["prompt"]
_SCREAMING_SNAKE_CASE : Any = ["prompt"]
_SCREAMING_SNAKE_CASE : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Optional[int] = False
@property
def a ( self : Any ) -> Optional[int]:
return 32
@property
def a ( self : List[Any] ) -> List[Any]:
return 32
@property
def a ( self : Tuple ) -> List[str]:
return self.time_input_dim * 4
@property
def a ( self : Dict ) -> Union[str, Any]:
return 8
@property
def a ( self : List[Any] ) -> Optional[Any]:
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def a ( self : Dict ) -> Any:
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ )
@property
def a ( self : str ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case = PriorTransformer(**SCREAMING_SNAKE_CASE_ )
return model
@property
def a ( self : Optional[Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case = ShapERenderer(**SCREAMING_SNAKE_CASE_ )
return model
def a ( self : Tuple ) -> Dict:
__snake_case = self.dummy_prior
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_renderer
__snake_case = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , )
__snake_case = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def a ( self : Optional[Any] ) -> str:
__snake_case = 'cpu'
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
__snake_case = output.images[0]
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : int ) -> List[str]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a ( self : Dict ) -> Any:
__snake_case = torch_device == 'cpu'
__snake_case = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , )
def a ( self : Union[str, Any] ) -> str:
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = 2
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case = batch_size * [inputs[key]]
__snake_case = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = pipe(
'a shark' , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Any = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase : str = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowercase : int = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowercase : Any = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowercase : Optional[Any] = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0.9 , lowerCAmelCase_=3 , lowerCAmelCase_=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
_snake_case = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase_ ) , word_tokenize(lowerCAmelCase_ ) , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , gamma=lowerCAmelCase_ )
for ref, pred in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
else:
_snake_case = [
meteor_score.single_meteor_score(lowerCAmelCase_ , lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , gamma=lowerCAmelCase_ )
for ref, pred in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
return {"meteor": np.mean(lowerCAmelCase_ )}
| 542 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
lowercase__ : int = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
lowercase__ : List[str] = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
lowercase__ : Any = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def _snake_case ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def _snake_case ( self : str , lowercase_ : Union[str, Any] , lowercase_ : str ):
snake_case_ : List[str] = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
snake_case_ : Union[str, Any] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
snake_case_ : Optional[int] = evaluate(dataset=_snake_case , predictions=_snake_case )
return score
| 123 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline
__UpperCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__UpperCAmelCase = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
__UpperCAmelCase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__UpperCAmelCase = False
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return 32
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 100
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : List[str] ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case : Optional[Any] =UNetaDConditionModel(**_snake_case )
return model
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Dict =VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : str =self.dummy_unet
snake_case : str =self.dummy_movq
snake_case : int ={
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case : Optional[int] =DDIMScheduler(**_snake_case )
snake_case : Union[str, Any] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __snake_case ( self : List[str], _snake_case : int, _snake_case : Dict=0 ):
'''simple docstring'''
snake_case : str =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case : str =floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
_snake_case )
# create init_image
snake_case : Union[str, Any] =floats_tensor((1, 3, 64, 64), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case : Optional[int] =image.cpu().permute(0, 2, 3, 1 )[0]
snake_case : int =Image.fromarray(np.uinta(_snake_case ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
snake_case : int =floats_tensor((1, 3, 64, 64), rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith('''mps''' ):
snake_case : Dict =torch.manual_seed(_snake_case )
else:
snake_case : Union[str, Any] =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case : Tuple ={
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : int ='''cpu'''
snake_case : Tuple =self.get_dummy_components()
snake_case : Optional[int] =self.pipeline_class(**_snake_case )
snake_case : Any =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case : Optional[int] =pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case : Tuple =output.images
snake_case : Optional[int] =pipe(
**self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0]
snake_case : Union[str, Any] =image[0, -3:, -3:, -1]
snake_case : Dict =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case : str =np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
snake_case : List[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case : List[Any] =init_image.resize((512, 512) )
snake_case : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
snake_case : List[Any] =torch.from_numpy(np.array(_snake_case ) ).float() / 255.0
snake_case : Optional[Any] =hint.permute(2, 0, 1 ).unsqueeze(0 )
snake_case : Any ='''A robot, 4k photo'''
snake_case : List[Any] =KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case : int =KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''', torch_dtype=torch.floataa )
snake_case : List[str] =pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case , snake_case : List[str] =pipe_prior(
_snake_case, image=_snake_case, strength=0.85, generator=_snake_case, negative_prompt='''''', ).to_tuple()
snake_case : List[str] =pipeline(
image=_snake_case, image_embeds=_snake_case, negative_image_embeds=_snake_case, hint=_snake_case, generator=_snake_case, num_inference_steps=100, height=512, width=512, strength=0.5, output_type='''np''', )
snake_case : List[str] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_snake_case, _snake_case )
| 349 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_a : int = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase__( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Union[str, Any] = (3, 32, 128)
lowercase__ : Tuple = tempfile.mkdtemp()
# fmt: off
lowercase__ : Optional[int] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase__ : Optional[int] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
lowercase__ : Union[str, Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowercase__ : str = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> Optional[Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__( self ) -> str:
lowercase__ : List[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowercase__ : Dict = Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) )
return image_input
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : int = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowercase__ : List[str] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : str = self.get_image_processor()
lowercase__ : Tuple = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ : List[str] = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
lowercase__ : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Optional[int] = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Dict = self.prepare_image_inputs()
lowercase__ : int = image_processor(lowerCamelCase__ , return_tensors="""np""" )
lowercase__ : int = processor(images=lowerCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Any = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : List[Any] = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Union[str, Any] = """test"""
lowercase__ : Tuple = processor(text=lowerCamelCase__ )
lowercase__ : Optional[int] = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Any = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Union[str, Any] = """test"""
lowercase__ : int = self.prepare_image_inputs()
lowercase__ : Optional[int] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : int = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : int = processor.char_decode(lowerCamelCase__ )
lowercase__ : Dict = tokenizer.batch_decode(lowerCamelCase__ )
lowercase__ : Any = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Any = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : List[Any] = None
lowercase__ : Dict = self.prepare_image_inputs()
lowercase__ : List[Any] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Dict = MgpstrProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : str = torch.randn(1 , 27 , 38 )
lowercase__ : int = torch.randn(1 , 27 , 5_0257 )
lowercase__ : List[Any] = torch.randn(1 , 27 , 3_0522 )
lowercase__ : List[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] ) | 128 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__snake_case = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
requires_backends(self , """decord""" )
self.check_model_type(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
lowercase__ : int = {}
if frame_sampling_rate is not None:
lowercase__ : Optional[Any] = frame_sampling_rate
if num_frames is not None:
lowercase__ : List[str] = num_frames
lowercase__ : str = {}
if top_k is not None:
lowercase__ : Any = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]:
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=1 ) -> Optional[Any]:
if num_frames is None:
lowercase__ : Dict = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowercase__ : Tuple = BytesIO(requests.get(lowerCamelCase__ ).content )
lowercase__ : int = VideoReader(lowerCamelCase__ )
videoreader.seek(0 )
lowercase__ : Optional[int] = 0
lowercase__ : Optional[int] = num_frames * frame_sampling_rate - 1
lowercase__ : Optional[Any] = np.linspace(lowerCamelCase__ , lowerCamelCase__ , num=lowerCamelCase__ , dtype=np.intaa )
lowercase__ : Union[str, Any] = videoreader.get_batch(lowerCamelCase__ ).asnumpy()
lowercase__ : Optional[int] = list(lowerCamelCase__ )
lowercase__ : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : int = self.model(**lowerCamelCase__ )
return model_outputs
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=5 ) -> Any:
if top_k > self.model.config.num_labels:
lowercase__ : int = self.model.config.num_labels
if self.framework == "pt":
lowercase__ : List[Any] = model_outputs.logits.softmax(-1 )[0]
lowercase__ , lowercase__ : int = probs.topk(lowerCamelCase__ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowercase__ : str = scores.tolist()
lowercase__ : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ , lowerCamelCase__ )] | 128 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE( self :Tuple ) ->int:
# A mock response for an HTTP head request to emulate server down
lowercase = mock.Mock()
lowercase = 500
lowercase = {}
lowercase = HTTPError
lowercase = {}
# Download this model to make sure it's in the cache.
lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
lowercase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE( self :Dict ) ->str:
# A mock response for an HTTP head request to emulate server down
lowercase = mock.Mock()
lowercase = 500
lowercase = {}
lowercase = HTTPError
lowercase = {}
# Download this model to make sure it's in the cache.
lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=lowerCAmelCase__ ) as mock_head:
lowercase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Optional[int]:
# This test is for deprecated behavior and can be removed in v5
try:
lowercase = tempfile.mktemp()
with open(lowerCAmelCase__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , lowerCAmelCase__ )
lowercase = AlbertTokenizer.from_pretrained(lowerCAmelCase__ )
finally:
os.remove(lowerCAmelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , lowerCAmelCase__ )
lowercase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def SCREAMING_SNAKE_CASE( self :str ) ->Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
lowercase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def SCREAMING_SNAKE_CASE( cls :Union[str, Any] ) ->List[Any]:
lowercase = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE( cls :Dict ) ->int:
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE( self :Any ) ->List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase = BertTokenizer(lowerCAmelCase__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowercase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__ , repo_id="test-tokenizer" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase = BertTokenizer(lowerCAmelCase__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCAmelCase__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def SCREAMING_SNAKE_CASE( self :List[str] ) ->Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(lowerCAmelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowercase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowercase = BertTokenizerFast.from_pretrained(lowerCAmelCase__ )
bert_tokenizer.save_pretrained(lowerCAmelCase__ )
lowercase = CustomTokenizerFast.from_pretrained(lowerCAmelCase__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowercase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowercase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Dict:
lowercase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def SCREAMING_SNAKE_CASE( self :List[str] ) ->List[Any]:
lowercase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Optional[Any]:
lowercase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->List[Any]:
lowercase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Dict:
lowercase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->Union[str, Any]:
lowercase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->int:
lowercase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Union[str, Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
lowercase = Trie()
lowercase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCAmelCase__ , ["AB", "C"] )
| 441 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase_ ( __a ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Optional[int]:
lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , "depth_multiplier" ) )
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :Dict , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any]=13 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :Optional[int]=32 , lowerCAmelCase__ :Any=0.25 , lowerCAmelCase__ :Dict=8 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :List[str]=6 , lowerCAmelCase__ :List[Any]=32 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Dict="relu6" , lowerCAmelCase__ :Tuple=1280 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :List[str]=0.02 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=10 , lowerCAmelCase__ :int=None , ) ->List[str]:
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = depth_multiplier
lowercase = depth_divisible_by
lowercase = min_depth
lowercase = expand_ratio
lowercase = tf_padding
lowercase = output_stride
lowercase = first_layer_is_expansion
lowercase = finegrained_output
lowercase = hidden_act
lowercase = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase = classifier_dropout_prob
lowercase = use_labels
lowercase = is_training
lowercase = num_labels
lowercase = initializer_range
lowercase = scope
def SCREAMING_SNAKE_CASE( self :str ) ->Dict:
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->List[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) ->Any:
lowercase = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) ->Union[str, Any]:
lowercase = self.num_labels
lowercase = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) ->Union[str, Any]:
lowercase = self.num_labels
lowercase = MobileNetVaForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE( self :Any ) ->int:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase : List[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : int = False
UpperCamelCase : str = False
UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Dict:
lowercase = MobileNetVaModelTester(self )
lowercase = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Dict ) ->Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Optional[int]:
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE( self :Any ) ->str:
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->str:
pass
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->Dict:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(lowerCAmelCase__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :int ) ->List[str]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :int ) ->Optional[int]:
def check_hidden_states_output(lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ):
lowercase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase = outputs.hidden_states
lowercase = 16
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Any ) ->Tuple:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :int ) ->str:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE( self :Union[str, Any] ) ->str:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __snake_case ( ):
'''simple docstring'''
lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE( self :str ) ->Any:
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE( self :Tuple ) ->Union[str, Any]:
lowercase = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(lowerCAmelCase__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
# verify the logits
lowercase = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
lowercase = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE( self :Tuple ) ->List[Any]:
lowercase = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
lowercase = model.to(lowerCAmelCase__ )
lowercase = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
lowercase = prepare_img()
lowercase = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits
# verify the logits
lowercase = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
lowercase = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=lowerCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 441 | 1 |
from __future__ import annotations
from collections import Counter
from random import random
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A_ = {}
def lowerCamelCase__ ( self : str , _snake_case : str ) -> None:
"""simple docstring"""
A_ = {}
def lowerCamelCase__ ( self : Optional[int] , _snake_case : str , _snake_case : str , _snake_case : float ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__UpperCamelCase )
if nodea not in self.connections:
self.add_node(__UpperCamelCase )
A_ = probability
def lowerCamelCase__ ( self : Dict ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def lowerCamelCase__ ( self : Tuple , _snake_case : str ) -> str:
"""simple docstring"""
A_ = 0
A_ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_A , _A , _A )
A_ = Counter(graph.get_nodes() )
A_ = start
for _ in range(_A ):
A_ = graph.transition(_A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCamelCase_ : Union[str, Any] = random.Random()
def A_ (__a , __a=1.0 , __a=None , __a=None ):
'''simple docstring'''
if rng is None:
A_ = global_rng
A_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any]=7 , _snake_case : Any=400 , _snake_case : Tuple=2_000 , _snake_case : str=2_048 , _snake_case : List[Any]=128 , _snake_case : List[Any]=1 , _snake_case : Union[str, Any]=512 , _snake_case : List[str]=30 , _snake_case : List[str]=44_100 , ) -> Optional[int]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = min_seq_length
A_ = max_seq_length
A_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A_ = spectrogram_length
A_ = feature_size
A_ = num_audio_channels
A_ = hop_length
A_ = chunk_length
A_ = sampling_rate
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCamelCase__ ( self : Dict , _snake_case : str=False , _snake_case : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
def _flatten(_snake_case : List[Any] ):
return list(itertools.chain(*_snake_case ) )
if equal_length:
A_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A_ = [np.asarray(_snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = TvltFeatureExtractor
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ = TvltFeatureExtractionTester(self )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_snake_case , "spectrogram_length" ) )
self.assertTrue(hasattr(_snake_case , "feature_size" ) )
self.assertTrue(hasattr(_snake_case , "num_audio_channels" ) )
self.assertTrue(hasattr(_snake_case , "hop_length" ) )
self.assertTrue(hasattr(_snake_case , "chunk_length" ) )
self.assertTrue(hasattr(_snake_case , "sampling_rate" ) )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = feat_extract_first.save_pretrained(_snake_case )[0]
check_json_file_has_correct_format(_snake_case )
A_ = self.feature_extraction_class.from_pretrained(_snake_case )
A_ = feat_extract_first.to_dict()
A_ = feat_extract_second.to_dict()
A_ = dict_first.pop("mel_filters" )
A_ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(_snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(_snake_case )
A_ = self.feature_extraction_class.from_json_file(_snake_case )
A_ = feat_extract_first.to_dict()
A_ = feat_extract_second.to_dict()
A_ = dict_first.pop("mel_filters" )
A_ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(_snake_case , _snake_case ) )
self.assertEqual(_snake_case , _snake_case )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
# Initialize feature_extractor
A_ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A_ = [np.asarray(_snake_case ) for speech_input in speech_inputs]
# Test not batched input
A_ = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A_ = feature_extractor(_snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A_ = feature_extractor(
_snake_case , return_tensors="np" , sampling_rate=44_100 , mask_audio=_snake_case ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A_ = np.asarray(_snake_case )
A_ = feature_extractor(_snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCamelCase__ ( self : Dict , _snake_case : int ) -> List[str]:
"""simple docstring"""
A_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A_ = ds.sort("id" ).select(range(_snake_case ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCamelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
A_ = self._load_datasamples(1 )
A_ = TvltFeatureExtractor()
A_ = feature_extractor(_snake_case , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
A_ = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _snake_case , atol=1e-4 ) )
| 482 | 0 |
def _UpperCAmelCase ( a__):
'''simple docstring'''
if not head:
return True
# split the list to two parts
a_ , a_ : Union[str, Any] = head.next, head
while fast and fast.next:
a_ : Optional[int] = fast.next.next
a_ : str = slow.next
a_ : Union[str, Any] = slow.next
a_ : Union[str, Any] = None # Don't forget here! But forget still works!
# reverse the second part
a_ : Union[str, Any] = None
while second:
a_ : List[Any] = second.next
a_ : Optional[int] = node
a_ : Tuple = second
a_ : int = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a_ : Dict = node.next
a_ : List[str] = head.next
return True
def _UpperCAmelCase ( a__):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a_ : Tuple = head
while fast and fast.next:
a_ , a_ : str = fast.next.next, slow.next
# 2. Push the second half into the stack
a_ : Optional[int] = [slow.val]
while slow.next:
a_ : int = slow.next
stack.append(slow.val)
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a_ : List[str] = cur.next
return True
def _UpperCAmelCase ( a__):
'''simple docstring'''
if not head or not head.next:
return True
a_ : Dict = {}
a_ : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(a__)
else:
a_ : List[Any] = [pos]
a_ : str = head.next
pos += 1
a_ : int = pos - 1
a_ : Dict = 0
for v in d.values():
if len(a__) % 2 != 0:
middle += 1
else:
a_ : Tuple = 0
for i in range(0 , len(a__)):
if v[i] + v[len(a__) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 540 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(a__ , a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ , a_ : List[Any] = emb.weight.shape
a_ : Dict = nn.Linear(a__ , a__ , bias=a__)
a_ : str = emb.weight.data
return lin_layer
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[int] = torch.load(a__ , map_location="""cpu""")
a_ : int = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
a_ : Dict = mam_aaa["""model"""]
remove_ignore_keys_(a__)
a_ : List[str] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
a_ : Union[str, Any] = MaMaaaConfig(
vocab_size=a__ , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
a_ : Union[str, Any] = state_dict["""decoder.embed_tokens.weight"""]
a_ : List[Any] = MaMaaaForConditionalGeneration(a__)
model.model.load_state_dict(a__ , strict=a__)
a_ : Any = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__snake_case : int = parser.parse_args()
__snake_case : Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 540 | 1 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase :
"""simple docstring"""
__lowercase :int = None
def _lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
lowerCamelCase_ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
lowerCamelCase_ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ ) | 66 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowercase : int = logging.get_logger(__name__)
__lowercase : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__lowercase : Optional[int] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__lowercase : Dict = {
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Dict = VOCAB_FILES_NAMES
__lowercase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Optional[int] = ["input_ids", "attention_mask"]
__lowercase :Any = BartTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCamelCase__ )
lowerCamelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase_ = '''post_processor'''
lowerCamelCase_ = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
lowerCamelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ = tuple(state['''sep'''] )
if "cls" in state:
lowerCamelCase_ = tuple(state['''cls'''] )
lowerCamelCase_ = False
if state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space:
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = True
if state.get('''trim_offsets''' , UpperCamelCase__ ) != trim_offsets:
lowerCamelCase_ = trim_offsets
lowerCamelCase_ = True
if changes_to_apply:
lowerCamelCase_ = getattr(UpperCamelCase__ , state.pop('''type''' ) )
lowerCamelCase_ = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
lowerCamelCase_ = value
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowerCamelCase_ = kwargs.get('''is_split_into_words''' , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 66 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class lowerCAmelCase_ :
__UpperCAmelCase = MBartConfig
__UpperCAmelCase = {}
__UpperCAmelCase = 'gelu'
def __init__( self : Dict, _snake_case : Dict, _snake_case : Dict=13, _snake_case : Dict=7, _snake_case : Dict=True, _snake_case : Optional[Any]=False, _snake_case : Optional[int]=99, _snake_case : Union[str, Any]=32, _snake_case : List[Any]=2, _snake_case : List[Any]=4, _snake_case : Any=37, _snake_case : int=0.1, _snake_case : Optional[int]=0.1, _snake_case : int=20, _snake_case : int=2, _snake_case : Any=1, _snake_case : List[Any]=0, ):
'''simple docstring'''
snake_case : Optional[Any] =parent
snake_case : Any =batch_size
snake_case : List[Any] =seq_length
snake_case : int =is_training
snake_case : Dict =use_labels
snake_case : Optional[int] =vocab_size
snake_case : Tuple =hidden_size
snake_case : Optional[Any] =num_hidden_layers
snake_case : Tuple =num_attention_heads
snake_case : Union[str, Any] =intermediate_size
snake_case : Any =hidden_dropout_prob
snake_case : Union[str, Any] =attention_probs_dropout_prob
snake_case : List[Any] =max_position_embeddings
snake_case : List[Any] =eos_token_id
snake_case : int =pad_token_id
snake_case : int =bos_token_id
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Optional[Any] =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
snake_case : List[str] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
snake_case : List[Any] =tf.concat([input_ids, eos_tensor], axis=1 )
snake_case : Dict =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case : int =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
snake_case : List[Any] =prepare_mbart_inputs_dict(_snake_case, _snake_case, _snake_case )
return config, inputs_dict
def __snake_case ( self : Optional[Any], _snake_case : Dict, _snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case : Dict =TFMBartModel(config=_snake_case ).get_decoder()
snake_case : Tuple =inputs_dict['''input_ids''']
snake_case : List[str] =input_ids[:1, :]
snake_case : Optional[Any] =inputs_dict['''attention_mask'''][:1, :]
snake_case : List[str] =inputs_dict['''head_mask''']
snake_case : str =1
# first forward pass
snake_case : Union[str, Any] =model(_snake_case, attention_mask=_snake_case, head_mask=_snake_case, use_cache=_snake_case )
snake_case , snake_case : List[Any] =outputs.to_tuple()
snake_case : Dict =past_key_values[1]
def _a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ):
if attention_mask is None:
snake_case : Any =tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case : Optional[int] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case : Any =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case : Optional[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase_ ( a_ , a_ , unittest.TestCase ):
__UpperCAmelCase = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCAmelCase = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def __snake_case ( self : List[str], _snake_case : Optional[Any], _snake_case : List[str], _snake_case : int, _snake_case : Union[str, Any], _snake_case : str ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Dict =TFMBartModelTester(self )
snake_case : Union[str, Any] =ConfigTester(self, config_class=_snake_case )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : List[str] ):
'''simple docstring'''
snake_case : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
__UpperCAmelCase = [
' UN Chief Says There Is No Military Solution in Syria',
]
__UpperCAmelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__UpperCAmelCase = 'facebook/mbart-large-en-ro'
@cached_property
def __snake_case ( self : Dict ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __snake_case ( self : str ):
'''simple docstring'''
snake_case : str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __snake_case ( self : Optional[Any], **_snake_case : Optional[int] ):
'''simple docstring'''
snake_case : Tuple =self.translate_src_text(**_snake_case )
self.assertListEqual(self.expected_text, _snake_case )
def __snake_case ( self : Dict, **_snake_case : str ):
'''simple docstring'''
snake_case : int =self.tokenizer(self.src_text, **_snake_case, return_tensors='''tf''' )
snake_case : Optional[int] =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2 )
snake_case : List[str] =self.tokenizer.batch_decode(_snake_case, skip_special_tokens=_snake_case )
return generated_words
@slow
def __snake_case ( self : int ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 349 | 0 |
"""simple docstring"""
import argparse
import os
import re
a__ : Optional[int] = """src/diffusers"""
# Pattern that looks at the indentation in a line.
a__ : str = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : Tuple = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : Any = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : int = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Tuple = re.compile(r"""\[([^\]]+)\]""")
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = _re_indent.search(__lowerCamelCase )
return "" if search is None else search.groups()[0]
def A__ ( __lowerCamelCase, __lowerCamelCase="", __lowerCamelCase=None, __lowerCamelCase=None ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__lowerCamelCase ):
index += 1
_lowerCAmelCase = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase = [lines[index]]
index += 1
while index < len(__lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__lowerCamelCase ) )
if index < len(__lowerCamelCase ) - 1:
_lowerCAmelCase = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase = []
else:
blocks.append('\n'.join(__lowerCamelCase ) )
_lowerCAmelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__lowerCamelCase ) > 0:
blocks.append('\n'.join(__lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__lowerCamelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def A__ ( __lowerCamelCase ):
"""simple docstring"""
def _inner(__lowerCamelCase ):
return key(__lowerCamelCase ).lower().replace('_', '' )
return _inner
def A__ ( __lowerCamelCase, __lowerCamelCase=None ):
"""simple docstring"""
# If no key is provided, we use a noop.
def noop(__lowerCamelCase ):
return x
if key is None:
_lowerCAmelCase = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase = [obj for obj in objects if key(__lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase = [obj for obj in objects if key(__lowerCamelCase )[0].isupper() and not key(__lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase = [obj for obj in objects if not key(__lowerCamelCase )[0].isupper()]
_lowerCAmelCase = ignore_underscore(__lowerCamelCase )
return sorted(__lowerCamelCase, key=__lowerCamelCase ) + sorted(__lowerCamelCase, key=__lowerCamelCase ) + sorted(__lowerCamelCase, key=__lowerCamelCase )
def A__ ( __lowerCamelCase ):
"""simple docstring"""
# This inner function sort imports between [ ].
def _replace(__lowerCamelCase ):
_lowerCAmelCase = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
_lowerCAmelCase = [part.strip().replace('"', '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__lowerCamelCase )] ) + "]"
_lowerCAmelCase = import_statement.split('\n' )
if len(__lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase = [(i, _re_strip_line.search(__lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase = sort_objects(__lowerCamelCase, key=lambda __lowerCamelCase : x[1] )
_lowerCAmelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase = _re_bracket_content.sub(_replace, lines[1] )
else:
_lowerCAmelCase = [part.strip().replace('"', '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase = keys[:-1]
_lowerCAmelCase = get_indent(lines[1] ) + ', '.join([F'''"{k}"''' for k in sort_objects(__lowerCamelCase )] )
return "\n".join(__lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase = _re_bracket_content.sub(_replace, __lowerCamelCase )
return import_statement
def A__ ( __lowerCamelCase, __lowerCamelCase=True ):
"""simple docstring"""
with open(__lowerCamelCase, 'r' ) as f:
_lowerCAmelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase = split_code_in_indented_blocks(
__lowerCamelCase, start_prompt='_import_structure = {', end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase = main_blocks[block_idx]
_lowerCAmelCase = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase = 0
while line_idx < len(__lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase = len(__lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(__lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase = split_code_in_indented_blocks(__lowerCamelCase, indent_level=__lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase = _re_direct_key if '_import_structure' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase = [(pattern.search(__lowerCamelCase ).groups()[0] if pattern.search(__lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase = [(i, key) for i, key in enumerate(__lowerCamelCase ) if key is not None]
_lowerCAmelCase = [x[0] for x in sorted(__lowerCamelCase, key=lambda __lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase = 0
_lowerCAmelCase = []
for i in range(len(__lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase = '\n'.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__lowerCamelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__lowerCamelCase, 'w' ) as f:
f.write('\n'.join(__lowerCamelCase ) )
def A__ ( __lowerCamelCase=True ):
"""simple docstring"""
_lowerCAmelCase = []
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase = sort_imports(os.path.join(__lowerCamelCase, '__init__.py' ), check_only=__lowerCamelCase )
if result:
_lowerCAmelCase = [os.path.join(__lowerCamelCase, '__init__.py' )]
if len(__lowerCamelCase ) > 0:
raise ValueError(F'''Would overwrite {len(__lowerCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
a__ : Optional[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 719 |
"""simple docstring"""
import os
from pathlib import Path
def A__ ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
_lowerCAmelCase = Path(__lowerCamelCase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
_lowerCAmelCase = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu', 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda', 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention', __lowerCamelCase, with_cuda=__lowerCamelCase, extra_include_paths=[str(__lowerCamelCase )], extra_cflags=['-DWITH_CUDA=1'], extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
], )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 309 | 0 |
'''simple docstring'''
import math
import random
def a_ ( __snake_case : float , __snake_case : bool = False ) -> float:
"""simple docstring"""
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a_ : Tuple = 0.02
def a_ ( __snake_case : int , __snake_case : int ) -> float:
"""simple docstring"""
lowerCamelCase_ =float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__snake_case ):
# Forward propagation
lowerCamelCase_ =sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCamelCase_ =(expected / 100) - layer_a
# Error delta
lowerCamelCase_ =layer_1_error * sigmoid_function(__snake_case , __snake_case )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Optional[int] = int(input("""Expected value: """))
a_ : Union[str, Any] = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[Any] = logging.get_logger(__name__)
_a : List[Any] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class a_ ( a ):
A__ : List[str] = 'efficientnet'
def __init__( self : int , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 600 , UpperCAmelCase__ : float = 2.0 , UpperCAmelCase__ : float = 3.1 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase__ : List[int] = [] , UpperCAmelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase__ : float = 0.25 , UpperCAmelCase__ : str = "swish" , UpperCAmelCase__ : int = 2_560 , UpperCAmelCase__ : str = "mean" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 0.001 , UpperCAmelCase__ : float = 0.99 , UpperCAmelCase__ : float = 0.5 , UpperCAmelCase__ : float = 0.2 , **UpperCAmelCase__ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase__ )
snake_case : List[str] = num_channels
snake_case : List[str] = image_size
snake_case : int = width_coefficient
snake_case : Dict = depth_coefficient
snake_case : Any = depth_divisor
snake_case : List[str] = kernel_sizes
snake_case : str = in_channels
snake_case : Optional[Any] = out_channels
snake_case : Tuple = depthwise_padding
snake_case : Union[str, Any] = strides
snake_case : Dict = num_block_repeats
snake_case : Any = expand_ratios
snake_case : Optional[int] = squeeze_expansion_ratio
snake_case : Dict = hidden_act
snake_case : List[str] = hidden_dim
snake_case : Dict = pooling_type
snake_case : List[str] = initializer_range
snake_case : int = batch_norm_eps
snake_case : Optional[Any] = batch_norm_momentum
snake_case : Any = dropout_rate
snake_case : Optional[int] = drop_connect_rate
snake_case : List[Any] = sum(UpperCAmelCase__ ) * 4
class a_ ( a ):
A__ : List[str] = version.parse('1.11' )
@property
def lowerCAmelCase( self : str ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
return 1e-5
| 84 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a_ :
def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : List[Any]=24 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Dict=4 , UpperCAmelCase__ : Optional[int]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Optional[Any]=2 , ):
"""simple docstring"""
snake_case : Tuple = parent
snake_case : Dict = batch_size
snake_case : str = patch_size
snake_case : Union[str, Any] = max_length
snake_case : str = num_mel_bins
snake_case : Any = is_training
snake_case : Union[str, Any] = use_labels
snake_case : Tuple = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Any = intermediate_size
snake_case : List[Any] = hidden_act
snake_case : str = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : str = scope
snake_case : int = frequency_stride
snake_case : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
snake_case : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
snake_case : Any = (self.max_length - self.patch_size) // self.time_stride + 1
snake_case : Union[str, Any] = frequency_out_dimension * time_out_dimension
snake_case : Union[str, Any] = num_patches + 2
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
snake_case : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
snake_case : str = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, input_values, labels
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ):
"""simple docstring"""
snake_case : str = ASTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
snake_case : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : int = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : int = config_and_inputs
snake_case : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class a_ ( a , a , unittest.TestCase ):
A__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A__ : int = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A__ : Optional[Any] = False
A__ : Dict = False
A__ : int = False
A__ : Optional[int] = False
def lowerCAmelCase( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = ASTModelTester(self )
snake_case : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case , snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Any = model_class(UpperCAmelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : str = [*signature.parameters.keys()]
snake_case : List[str] = ['''input_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCAmelCase( self : List[str] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = ASTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def a_ ( ) -> Dict:
"""simple docstring"""
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
snake_case , snake_case : int = torchaudio.load(__magic_name__ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowerCAmelCase( self : Tuple ):
"""simple docstring"""
snake_case : List[str] = self.default_feature_extractor
snake_case : str = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(UpperCAmelCase__ )
snake_case : str = self.default_feature_extractor
snake_case , snake_case : int = prepare_audio()
snake_case : Optional[int] = audio.squeeze().numpy()
snake_case : Optional[Any] = feature_extractor(UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , return_tensors='''pt''' ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case : Union[str, Any] = model(**UpperCAmelCase__ )
# verify the logits
snake_case : Any = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
snake_case : str = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 84 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase = 16
lowerCAmelCase = 32
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase = mocked_dataloaders # noqa: F811
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , SCREAMING_SNAKE_CASE ) == "1":
lowercase__ = 2
# New Code #
lowercase__ = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config['''lr''']
lowercase__ = int(config['''num_epochs'''] )
lowercase__ = int(config['''seed'''] )
lowercase__ = int(config['''batch_size'''] )
lowercase__ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE )
def _a ( ):
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ = parser.parse_args()
lowercase__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 43 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
A : List[str] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
A : Any = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int = CHRF.CHAR_ORDER , _UpperCAmelCase : int = CHRF.WORD_ORDER , _UpperCAmelCase : int = CHRF.BETA , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , ) -> int:
"""simple docstring"""
lowercase__ = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowercase__ = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
lowercase__ = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__ = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase ={
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'lxmert'
__UpperCAmelCase = {}
def __init__( self : int ,snake_case : List[Any]=30522 ,snake_case : str=768 ,snake_case : Union[str, Any]=12 ,snake_case : int=9500 ,snake_case : Any=1600 ,snake_case : Union[str, Any]=400 ,snake_case : int=3072 ,snake_case : Any="gelu" ,snake_case : Any=0.1 ,snake_case : int=0.1 ,snake_case : Optional[int]=512 ,snake_case : int=2 ,snake_case : Dict=0.02 ,snake_case : List[Any]=1e-12 ,snake_case : List[str]=9 ,snake_case : Tuple=5 ,snake_case : Tuple=5 ,snake_case : List[Any]=2048 ,snake_case : Union[str, Any]=4 ,snake_case : Any=6.67 ,snake_case : Dict=True ,snake_case : Union[str, Any]=True ,snake_case : Union[str, Any]=True ,snake_case : Dict=True ,snake_case : str=True ,snake_case : int=True ,snake_case : Any=True ,**snake_case : Dict ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =num_qa_labels
SCREAMING_SNAKE_CASE =num_object_labels
SCREAMING_SNAKE_CASE =num_attr_labels
SCREAMING_SNAKE_CASE =l_layers
SCREAMING_SNAKE_CASE =x_layers
SCREAMING_SNAKE_CASE =r_layers
SCREAMING_SNAKE_CASE =visual_feat_dim
SCREAMING_SNAKE_CASE =visual_pos_dim
SCREAMING_SNAKE_CASE =visual_loss_normalizer
SCREAMING_SNAKE_CASE =task_matched
SCREAMING_SNAKE_CASE =task_mask_lm
SCREAMING_SNAKE_CASE =task_obj_predict
SCREAMING_SNAKE_CASE =task_qa
SCREAMING_SNAKE_CASE =visual_obj_loss
SCREAMING_SNAKE_CASE =visual_attr_loss
SCREAMING_SNAKE_CASE =visual_feat_loss
SCREAMING_SNAKE_CASE ={'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**snake_case )
| 252 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Dict =["image_processor", "tokenizer"]
UpperCAmelCase_ : Any ="CLIPImageProcessor"
UpperCAmelCase_ : Optional[int] =("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__snake_case : Optional[int] = kwargs.pop("feature_extractor" )
__snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : int = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
__snake_case : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case : List[str] = self.tokenizer.model_input_names
__snake_case : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 243 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Dict ="ClapFeatureExtractor"
UpperCAmelCase_ : Union[str, Any] =("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[str] = kwargs.pop("sampling_rate" , UpperCAmelCase )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
__snake_case : Optional[int] = self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if audios is not None:
__snake_case : int = self.feature_extractor(
UpperCAmelCase , sampling_rate=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and audios is not None:
__snake_case : str = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case : Tuple = self.tokenizer.model_input_names
__snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 243 | 1 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Any=99 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Any=512 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def a ( self : Any ) -> Union[str, Any]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Optional[int] ) -> List[str]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
lowerCAmelCase__ = NystromformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCAmelCase__ = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
lowerCAmelCase__ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
lowerCAmelCase__ = NystromformerForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
lowerCAmelCase__ = NystromformerForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = NystromformerForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = NystromformerForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = NystromformerForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def a ( self : str ) -> List[Any]:
lowerCAmelCase__ = NystromformerModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def a ( self : Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def a ( self : List[str] ) -> str:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def a ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase__ = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def a ( self : Dict ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def a ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def a ( self : Dict ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def a ( self : Tuple ) -> List[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def a ( self : str ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def a ( self : Union[str, Any] ) -> str:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = NystromformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase__ = model(__UpperCamelCase )[0]
lowerCAmelCase__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __UpperCamelCase )
lowerCAmelCase__ = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def a ( self : str ) -> Tuple:
lowerCAmelCase__ = "the [MASK] of Belgium is Brussels"
lowerCAmelCase__ = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase__ = tokenizer(__UpperCamelCase , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(encoding.input_ids ).logits
lowerCAmelCase__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , "capital" )
| 702 |
UpperCamelCase = 9.80_665
def _A ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError("Impossible fluid density" )
if volume < 0:
raise ValueError("Impossible Object volume" )
if gravity <= 0:
raise ValueError("Impossible Gravity" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 125 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''glpn'''
def __init__(self , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE__=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE__=[32, 64, 1_60, 2_56] , SCREAMING_SNAKE_CASE__=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE__=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE__=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE__=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=-1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Any = num_encoder_blocks
SCREAMING_SNAKE_CASE__ : Dict = depths
SCREAMING_SNAKE_CASE__ : str = sr_ratios
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE__ : Any = patch_sizes
SCREAMING_SNAKE_CASE__ : int = strides
SCREAMING_SNAKE_CASE__ : Dict = mlp_ratios
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = decoder_hidden_size
SCREAMING_SNAKE_CASE__ : str = max_depth
SCREAMING_SNAKE_CASE__ : Optional[Any] = head_in_index
| 223 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ : Tuple = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ : Dict = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
SCREAMING_SNAKE_CASE__ : Tuple = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' ,_snake_case ,)
is not None
):
SCREAMING_SNAKE_CASE__ : Dict = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
SCREAMING_SNAKE_CASE__ : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
SCREAMING_SNAKE_CASE__ : List[str] = True
if not attribute_used:
SCREAMING_SNAKE_CASE__ : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
SCREAMING_SNAKE_CASE__ : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
SCREAMING_SNAKE_CASE__ : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
elif attribute.endswith("""_token_id""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = True
# configuration class specific cases
if not case_allowed:
SCREAMING_SNAKE_CASE__ : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
SCREAMING_SNAKE_CASE__ : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Any = dict(inspect.signature(config_class.__init__ ).parameters )
SCREAMING_SNAKE_CASE__ : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
SCREAMING_SNAKE_CASE__ : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
SCREAMING_SNAKE_CASE__ : List[Any] = {}
if len(config_class.attribute_map ) > 0:
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
SCREAMING_SNAKE_CASE__ : int = inspect.getsourcefile(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
SCREAMING_SNAKE_CASE__ : Optional[Any] = [os.path.join(_snake_case ,_snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith("""modeling_""" )]
# Get the source code strings
SCREAMING_SNAKE_CASE__ : Any = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
SCREAMING_SNAKE_CASE__ : List[str] = []
for config_param, default_value in zip(_snake_case ,_snake_case ):
# `attributes` here is all the variant names for `config_param`
SCREAMING_SNAKE_CASE__ : List[Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda _snake_case : inspect.isclass(_snake_case )
and issubclass(_snake_case ,_snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : str = unused_attributes
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 223 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCAmelCase_ = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"RUCAIBox/mvp": 1_0_2_4,
}
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =VOCAB_FILES_NAMES
__lowerCAmelCase : List[Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Tuple =['''input_ids''', '''attention_mask''']
__lowerCAmelCase : Union[str, Any] =MvpTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="replace" , UpperCamelCase="<s>" , UpperCamelCase="</s>" , UpperCamelCase="</s>" , UpperCamelCase="<s>" , UpperCamelCase="<unk>" , UpperCamelCase="<pad>" , UpperCamelCase="<mask>" , UpperCamelCase=False , UpperCamelCase=True , **UpperCamelCase , ):
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase) != add_prefix_space:
lowerCamelCase__ = getattr(UpperCamelCase , pre_tok_state.pop("type"))
lowerCamelCase__ = add_prefix_space
lowerCamelCase__ = pre_tok_class(**UpperCamelCase)
lowerCamelCase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase__ = "post_processor"
lowerCamelCase__ = getattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase)
if tokenizer_component_instance:
lowerCamelCase__ = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ = tuple(state["sep"])
if "cls" in state:
lowerCamelCase__ = tuple(state["cls"])
lowerCamelCase__ = False
if state.get("add_prefix_space" , UpperCamelCase) != add_prefix_space:
lowerCamelCase__ = add_prefix_space
lowerCamelCase__ = True
if state.get("trim_offsets" , UpperCamelCase) != trim_offsets:
lowerCamelCase__ = trim_offsets
lowerCamelCase__ = True
if changes_to_apply:
lowerCamelCase__ = getattr(UpperCamelCase , state.pop("type"))
lowerCamelCase__ = component_class(**UpperCamelCase)
setattr(self.backend_tokenizer , UpperCamelCase , UpperCamelCase)
@property
def __UpperCAmelCase ( self):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def __UpperCAmelCase ( self , UpperCamelCase):
lowerCamelCase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase) if isinstance(UpperCamelCase , UpperCamelCase) else value
lowerCamelCase__ = value
def __UpperCAmelCase ( self , *UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ = kwargs.get("is_split_into_words" , UpperCamelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase)
def __UpperCAmelCase ( self , *UpperCamelCase , **UpperCamelCase):
lowerCamelCase__ = kwargs.get("is_split_into_words" , UpperCamelCase)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs.")
return super()._encode_plus(*UpperCamelCase , **UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase = None):
lowerCamelCase__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase)
return tuple(UpperCamelCase)
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase=None):
lowerCamelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase = None):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 426 |
'''simple docstring'''
def lowerCAmelCase( a__ : str ):
'''simple docstring'''
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
lowerCamelCase__ = ""
while len(a__ ) % 3 != 0:
lowerCamelCase__ = "0" + bin_string
lowerCamelCase__ = [
bin_string[index : index + 3]
for index in range(len(a__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase__ = 0
for index, val in enumerate(a__ ):
oct_val += int(2 ** (2 - index) * int(a__ ) )
oct_string += str(a__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 426 | 1 |
from math import factorial
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
SCREAMING_SNAKE_CASE__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE__ = float(factorial(lowerCAmelCase_ ) )
coefficient /= factorial(lowerCAmelCase_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 100 |
from torch import nn
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 100 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowercase ( enum.Enum ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
@add_end_docstrings(a__ )
class __lowercase ( a__ ):
_lowerCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *lowercase__ : Tuple , **lowercase__ : Any ):
super().__init__(*lowercase__ , **lowercase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a_ = None
if self.model.config.prefix is not None:
a_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a_ , a_ , a_ = self._sanitize_parameters(prefix=lowercase__ , **self._forward_params )
a_ = {**self._preprocess_params, **preprocess_params}
a_ = {**self._forward_params, **forward_params}
def __magic_name__ ( self : Any , lowercase__ : Tuple=None , lowercase__ : List[str]=None , lowercase__ : Optional[Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Any=None , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , **lowercase__ : Optional[Any] , ):
a_ = {}
if prefix is not None:
a_ = prefix
if prefix:
a_ = self.tokenizer(
lowercase__ , padding=lowercase__ , add_special_tokens=lowercase__ , return_tensors=self.framework )
a_ = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
a_ = handle_long_generation
preprocess_params.update(lowercase__ )
a_ = generate_kwargs
a_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
a_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
a_ = ReturnType.TENSORS
if return_type is not None:
a_ = return_type
if clean_up_tokenization_spaces is not None:
a_ = clean_up_tokenization_spaces
if stop_sequence is not None:
a_ = self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
if len(lowercase__ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
a_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __magic_name__ ( self : int , *lowercase__ : int , **lowercase__ : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*lowercase__ , **lowercase__ )
def __call__( self : Union[str, Any] , lowercase__ : List[Any] , **lowercase__ : str ):
return super().__call__(lowercase__ , **lowercase__ )
def __magic_name__ ( self : Any , lowercase__ : List[Any] , lowercase__ : int="" , lowercase__ : Union[str, Any]=None , **lowercase__ : int ):
a_ = self.tokenizer(
prefix + prompt_text , padding=lowercase__ , add_special_tokens=lowercase__ , return_tensors=self.framework )
a_ = prompt_text
if handle_long_generation == "hole":
a_ = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
a_ = generate_kwargs['''max_new_tokens''']
else:
a_ = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
a_ = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
a_ = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Dict , **lowercase__ : Tuple ):
a_ = model_inputs['''input_ids''']
a_ = model_inputs.get('''attention_mask''' , lowercase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a_ = None
a_ = None
a_ = 1
else:
a_ = input_ids.shape[0]
a_ = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a_ = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
a_ = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
a_ = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a_ = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a_ = self.model.generate(input_ids=lowercase__ , attention_mask=lowercase__ , **lowercase__ )
a_ = generated_sequence.shape[0]
if self.framework == "pt":
a_ = generated_sequence.reshape(lowercase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a_ = tf.reshape(lowercase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __magic_name__ ( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : Dict=ReturnType.FULL_TEXT , lowercase__ : Tuple=True ):
a_ = model_outputs['''generated_sequence'''][0]
a_ = model_outputs['''input_ids''']
a_ = model_outputs['''prompt_text''']
a_ = generated_sequence.numpy().tolist()
a_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a_ = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a_ = self.tokenizer.decode(
lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a_ = 0
else:
a_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a_ = prompt_text + text[prompt_length:]
else:
a_ = text[prompt_length:]
a_ = {'''generated_text''': all_text}
records.append(lowercase__ )
return records
| 143 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 1 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,):
"""simple docstring"""
UpperCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the reference grid
UpperCamelCase = 1
UpperCamelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowercase ) )
] # the action grid
UpperCamelCase = init[0]
UpperCamelCase = init[1]
UpperCamelCase = 0
UpperCamelCase = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCamelCase = [[f, g, x, y]]
UpperCamelCase = False # flag that is set when search is complete
UpperCamelCase = False # flag set if we can't find expand
while not found and not resign:
if len(_lowercase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCamelCase = cell.pop()
UpperCamelCase = next_cell[2]
UpperCamelCase = next_cell[3]
UpperCamelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCamelCase = True
else:
for i in range(len(_lowercase ) ): # to try out different valid actions
UpperCamelCase = x + DIRECTIONS[i][0]
UpperCamelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowercase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCamelCase = g + cost
UpperCamelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCamelCase = 1
UpperCamelCase = i
UpperCamelCase = []
UpperCamelCase = goal[0]
UpperCamelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCamelCase = x - DIRECTIONS[action[x][y]][0]
UpperCamelCase = y - DIRECTIONS[action[x][y]][1]
UpperCamelCase = xa
UpperCamelCase = ya
invpath.append([x, y] )
UpperCamelCase = []
for i in range(len(_lowercase ) ):
path.append(invpath[len(_lowercase ) - 1 - i] )
return path, action
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
SCREAMING_SNAKE_CASE_ = [0, 0]
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE_ = [len(grid) - 1, len(grid[0]) - 1]
SCREAMING_SNAKE_CASE_ = 1
# the cost map which pushes the path closer to the goal
SCREAMING_SNAKE_CASE_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
SCREAMING_SNAKE_CASE_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
SCREAMING_SNAKE_CASE_ = 99
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i]) | 34 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowercase ( lowercase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__SCREAMING_SNAKE_CASE : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'''labels''': ClassLabel} )
__SCREAMING_SNAKE_CASE : str = "text"
__SCREAMING_SNAKE_CASE : str = "labels"
def a ( self , snake_case ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
snake_case_ = copy.deepcopy(self )
snake_case_ = self.label_schema.copy()
snake_case_ = features[self.label_column]
snake_case_ = label_schema
return task_template
@property
def a ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 362 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(SCREAMING_SNAKE_CASE ):
a__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _UpperCAmelCase ( self ) -> Optional[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE ):
a__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def _UpperCAmelCase ( self ) -> Dict:
a__ = pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ) -> Any:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a__ = pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def _UpperCAmelCase ( self ) -> List[Any]:
a__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCAmelCase ( self ) -> Tuple:
a__ = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def _UpperCAmelCase ( self ) -> List[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a__ = pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def _UpperCAmelCase ( self ) -> List[str]:
a__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _UpperCAmelCase ( self ) -> Dict:
import PIL.Image
a__ = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=SCREAMING_SNAKE_CASE ) as mock_cast_to_python_objects:
a__ = pa.array(TypedSequence([{'''path''': None, '''bytes''': b'''image_bytes'''}, pil_image] , type=Image() ) )
a__ , a__ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , SCREAMING_SNAKE_CASE )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = pa.BufferReader(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , pa.Buffer ) else pa.memory_map(__UpperCAmelCase )
a__ = pa.ipc.open_stream(__UpperCAmelCase )
a__ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = pa.BufferOutputStream()
a__ = pa.schema(__UpperCAmelCase ) if fields else None
with ArrowWriter(stream=__UpperCAmelCase , schema=__UpperCAmelCase , writer_batch_size=__UpperCAmelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __a ( ):
a__ = pa.BufferOutputStream()
a__ = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=__UpperCAmelCase , features=__UpperCAmelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
a__ = pa.BufferReader(output.getvalue() )
a__ = pa.ipc.open_stream(__UpperCAmelCase )
a__ = f.read_all()
a__ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__UpperCAmelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def __a ( __UpperCAmelCase ):
a__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCAmelCase , writer_batch_size=__UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=__UpperCAmelCase , ) as writer:
with pytest.raises(__UpperCAmelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
a__ , a__ = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def __a ( __UpperCAmelCase ):
a__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCAmelCase , writer_batch_size=__UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=__UpperCAmelCase , ) as writer:
with pytest.raises(__UpperCAmelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
a__ , a__ = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def __a ( __UpperCAmelCase ):
a__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCAmelCase , writer_batch_size=__UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=__UpperCAmelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = pa.BufferOutputStream()
a__ = pa.schema(__UpperCAmelCase ) if fields else None
with ArrowWriter(stream=__UpperCAmelCase , schema=__UpperCAmelCase , writer_batch_size=__UpperCAmelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = pa.BufferOutputStream()
a__ = pa.schema(__UpperCAmelCase ) if fields else None
with ArrowWriter(stream=__UpperCAmelCase , schema=__UpperCAmelCase , writer_batch_size=__UpperCAmelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = pa.BufferOutputStream()
a__ = pa.schema(__UpperCAmelCase ) if fields else None
with ArrowWriter(stream=__UpperCAmelCase , schema=__UpperCAmelCase , writer_batch_size=__UpperCAmelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __a ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
a__ = os.path.join(__UpperCAmelCase , '''test.arrow''' )
with ArrowWriter(path=__UpperCAmelCase , schema=pa.schema(__UpperCAmelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(__UpperCAmelCase , 1 )
def __a ( __UpperCAmelCase ):
if pa.types.is_list(__UpperCAmelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
if isinstance(lst[0] , __UpperCAmelCase ):
change_first_primitive_element_in_list(lst[0] , __UpperCAmelCase )
else:
a__ = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = pa.array(TypedSequence(__UpperCAmelCase , optimized_int_type=__UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
# in range
a__ = pa.array(OptimizedTypedSequence(__UpperCAmelCase , col=__UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
a__ = copy.deepcopy(__UpperCAmelCase )
a__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__UpperCAmelCase , __UpperCAmelCase )
a__ = pa.array(OptimizedTypedSequence(__UpperCAmelCase , col=__UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
a__ = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=__UpperCAmelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __a ( __UpperCAmelCase ):
a__ = '''mock://dataset-train.arrow'''
with ArrowWriter(path=__UpperCAmelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__UpperCAmelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__UpperCAmelCase )
def __a ( ):
a__ = pa.BufferOutputStream()
with ParquetWriter(stream=__UpperCAmelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
a__ , a__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
a__ = pa.BufferReader(output.getvalue() )
a__ = pq.read_table(__UpperCAmelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
import PIL.Image
a__ = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__UpperCAmelCase , format='''png''' )
a__ = pa.BufferOutputStream()
with ParquetWriter(
stream=__UpperCAmelCase , features=Features({'''image''': Image()} ) , embed_local_files=__UpperCAmelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
a__ = pa.BufferReader(output.getvalue() )
a__ = pq.read_table(__UpperCAmelCase )
a__ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , __UpperCAmelCase )
with open(__UpperCAmelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __a ( ):
a__ = pa.schema([pa.field('''col_1''' , pa.string() , nullable=__UpperCAmelCase )] )
a__ = pa.BufferOutputStream()
with ArrowWriter(stream=__UpperCAmelCase ) as writer:
writer._build_writer(inferred_schema=__UpperCAmelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 148 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict:
a__ = data
a__ = None
class __UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
a__ = None
a__ = None
def __iter__( self ) -> Iterator[Any]:
a__ = self.head
while self.head:
yield node.data
a__ = node.next
if node == self.head:
break
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> int:
return "->".join(str(SCREAMING_SNAKE_CASE ) for item in iter(self ) )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
self.insert_nth(0 , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
a__ = Node(SCREAMING_SNAKE_CASE )
if self.head is None:
a__ = new_node # first node points itself
a__ = a__ = new_node
elif index == 0: # insert at head
a__ = self.head
a__ = a__ = new_node
else:
a__ = self.head
for _ in range(index - 1 ):
a__ = temp.next
a__ = temp.next
a__ = new_node
if index == len(self ) - 1: # insert at tail
a__ = new_node
def _UpperCAmelCase ( self ) -> int:
return self.delete_nth(0 )
def _UpperCAmelCase ( self ) -> Any:
return self.delete_nth(len(self ) - 1 )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE = 0 ) -> Any:
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
a__ = self.head
if self.head == self.tail: # just one node
a__ = a__ = None
elif index == 0: # delete head node
a__ = self.tail.next.next
a__ = self.head.next
else:
a__ = self.head
for _ in range(index - 1 ):
a__ = temp.next
a__ = temp.next
a__ = temp.next.next
if index == len(self ) - 1: # delete at tail
a__ = temp
return delete_node.data
def _UpperCAmelCase ( self ) -> bool:
return len(self ) == 0
def __a ( ):
a__ = CircularLinkedList()
assert len(__UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCAmelCase ) == i
circular_linked_list.insert_nth(__UpperCAmelCase , i + 1 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 1 |
__snake_case : List[Any] = range(2, 20 + 1)
__snake_case : Optional[Any] = [10**k for k in range(ks[-1] + 1)]
__snake_case : dict[int, dict[int, list[list[int]]]] = {}
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Tuple = sum(a_i[j] for j in range(a__ , len(a__)))
a_ : int = sum(a_i[j] * base[j] for j in range(min(len(a__) , a__)))
a_ , a_ : Tuple = 0, 0
a_ : List[str] = n - i
a_ : Tuple = memo.get(a__)
if sub_memo is not None:
a_ : Optional[Any] = sub_memo.get(a__)
if jumps is not None and len(a__) > 0:
# find and make the largest jump without going over
a_ : Dict = -1
for _k in range(len(a__) - 1 , -1 , -1):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
a_ : Union[str, Any] = _k
break
if max_jump >= 0:
a_ , a_ , a_ : Tuple = jumps[max_jump]
# since the difference between jumps is cached, add c
a_ : Union[str, Any] = diff + c
for j in range(min(a__ , len(a__))):
a_ , a_ : Dict = divmod(a__ , 1_0)
if new_c > 0:
add(a__ , a__ , a__)
else:
a_ : int = []
else:
a_ : Dict = {c: []}
a_ : Any = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
a_ , a_ : Optional[int] = next_term(a__ , k - 1 , i + dn , a__)
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
a_ , a_ : Optional[int] = compute(a__ , a__ , i + dn , a__)
diff += _diff
dn += terms_jumped
a_ : List[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
a_ : str = 0
while j < len(a__):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(a__ , (diff, dn, k))
return (diff, dn)
def _UpperCAmelCase ( a__ , a__ , a__ , a__):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(a__):
a_i.extend([0 for _ in range(k - len(a__))])
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
a_ : Union[str, Any] = i
a_ , a_ , a_ : Tuple = 0, 0, 0
for j in range(len(a__)):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
a_ : int = ds_c + ds_b
diff += addend
a_ : Any = 0
for j in range(a__):
a_ : Dict = a_i[j] + addend
a_ , a_ : List[str] = divmod(a__ , 1_0)
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(a__ , a__ , a__)
return diff, i - start_i
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
for j in range(a__ , len(a__)):
a_ : Any = digits[j] + addend
if s >= 1_0:
a_ , a_ : Optional[int] = divmod(a__ , 1_0)
a_ : Any = addend // 1_0 + quotient
else:
a_ : Any = s
a_ : int = addend // 1_0
if addend == 0:
break
while addend > 0:
a_ , a_ : Optional[int] = divmod(a__ , 1_0)
digits.append(a__)
def _UpperCAmelCase ( a__ = 1_0**1_5):
'''simple docstring'''
a_ : Optional[Any] = [1]
a_ : Any = 1
a_ : int = 0
while True:
a_ , a_ : Dict = next_term(a__ , 2_0 , i + dn , a__)
dn += terms_jumped
if dn == n - i:
break
a_ : str = 0
for j in range(len(a__)):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 540 |
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(a__ , x % y)
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
return (x * y) // greatest_common_divisor(a__ , a__)
def _UpperCAmelCase ( a__ = 2_0):
'''simple docstring'''
a_ : Any = 1
for i in range(1 , n + 1):
a_ : Tuple = lcm(a__ , a__)
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 540 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str = " " ):
'''simple docstring'''
_a = []
_a = 0
for index, char in enumerate(UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_a = index + 1
elif index + 1 == len(UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 719 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def snake_case_ ():
'''simple docstring'''
if os.name == "nt":
_a = CursorInfo()
_a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
_a = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def snake_case_ ():
'''simple docstring'''
if os.name == "nt":
_a = CursorInfo()
_a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
_a = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase , ctypes.byref(UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def snake_case_ ():
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 377 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : List[Any] = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Any = '''segformer'''
def __init__( self :List[str] ,__snake_case :List[Any]=3 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=[2, 2, 2, 2] ,__snake_case :Union[str, Any]=[8, 4, 2, 1] ,__snake_case :Tuple=[32, 64, 1_60, 2_56] ,__snake_case :Tuple=[7, 3, 3, 3] ,__snake_case :Any=[4, 2, 2, 2] ,__snake_case :List[Any]=[1, 2, 5, 8] ,__snake_case :List[str]=[4, 4, 4, 4] ,__snake_case :Any="gelu" ,__snake_case :List[str]=0.0 ,__snake_case :Any=0.0 ,__snake_case :List[Any]=0.1 ,__snake_case :Dict=0.02 ,__snake_case :Any=0.1 ,__snake_case :str=1E-6 ,__snake_case :Dict=2_56 ,__snake_case :Optional[int]=2_55 ,**__snake_case :str ,) -> Optional[int]:
super().__init__(**__snake_case )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' ,__snake_case ,)
a__ = num_channels
a__ = num_encoder_blocks
a__ = depths
a__ = sr_ratios
a__ = hidden_sizes
a__ = patch_sizes
a__ = strides
a__ = mlp_ratios
a__ = num_attention_heads
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = classifier_dropout_prob
a__ = initializer_range
a__ = drop_path_rate
a__ = layer_norm_eps
a__ = decoder_hidden_size
a__ = kwargs.get('reshape_last_stage' ,__snake_case )
a__ = semantic_loss_ignore_index
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = version.parse('''1.11''' )
@property
def lowerCamelCase__( self :Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__( self :List[Any] ) -> float:
return 1E-4
@property
def lowerCamelCase__( self :List[Any] ) -> int:
return 12
| 335 |
import argparse
import datetime
def __lowercase ( __lowerCAmelCase : str ):
a__ = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
a__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__lowerCAmelCase ) < 1_1:
raise ValueError('Must be 10 characters long' )
# Get month
a__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('Month must be between 1 - 12' )
a__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
a__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
a__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
a__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
a__ = datetime.date(int(__lowerCAmelCase ) , int(__lowerCAmelCase ) , int(__lowerCAmelCase ) )
# Start math
if m <= 2:
a__ = y - 1
a__ = m + 1_2
# maths var
a__ = int(str(__lowerCAmelCase )[:2] )
a__ = int(str(__lowerCAmelCase )[2:] )
a__ = int(2.6 * m - 5.39 )
a__ = int(c / 4 )
a__ = int(k / 4 )
a__ = int(d + k )
a__ = int(t + u + v + x )
a__ = int(z - (2 * c) )
a__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
a__ = F'Your date {date_input}, is a {days[str(__lowerCAmelCase )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Any = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
snake_case : str = parser.parse_args()
zeller(args.date_input)
| 335 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: Optional[int] , snake_case: List[str]=7 , snake_case: Tuple=3 , snake_case: Tuple=30 , snake_case: Dict=400 , snake_case: Dict=True , snake_case: Optional[int]=None , snake_case: List[Any]=True , snake_case: Dict=[0.5, 0.5, 0.5] , snake_case: List[str]=[0.5, 0.5, 0.5] , snake_case: Optional[Any]=True , snake_case: Union[str, Any]=1 / 255 , snake_case: Dict=True , ) -> Optional[int]:
snake_case_ :int = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
snake_case_ :List[Any] = parent
snake_case_ :List[str] = batch_size
snake_case_ :List[str] = num_channels
snake_case_ :Any = min_resolution
snake_case_ :List[Any] = max_resolution
snake_case_ :List[str] = do_resize
snake_case_ :Any = size
snake_case_ :Union[str, Any] = do_normalize
snake_case_ :Dict = image_mean
snake_case_ :Optional[int] = image_std
snake_case_ :Dict = do_rescale
snake_case_ :str = rescale_factor
snake_case_ :str = do_pad
def lowerCAmelCase_ ( self: List[str] ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: List[str] , snake_case: Optional[Any]=False ) -> Tuple:
if not batched:
snake_case_ :Union[str, Any] = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ):
snake_case_, snake_case_ :int = image.size
else:
snake_case_, snake_case_ :List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case_ :Union[str, Any] = int(self.size["""shortest_edge"""] * h / w )
snake_case_ :Optional[int] = self.size["""shortest_edge"""]
elif w > h:
snake_case_ :int = self.size["""shortest_edge"""]
snake_case_ :List[str] = int(self.size["""shortest_edge"""] * w / h )
else:
snake_case_ :Any = self.size["""shortest_edge"""]
snake_case_ :int = self.size["""shortest_edge"""]
else:
snake_case_ :Union[str, Any] = []
for image in image_inputs:
snake_case_, snake_case_ :Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ :List[Any] = max(__SCREAMING_SNAKE_CASE , key=lambda snake_case : item[0] )[0]
snake_case_ :Union[str, Any] = max(__SCREAMING_SNAKE_CASE , key=lambda snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : str = YolosImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: Any ) -> Optional[int]:
snake_case_ :Tuple = YolosImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: Any ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
def lowerCAmelCase_ ( self: int ) -> Tuple:
snake_case_ :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
snake_case_ :str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
pass
def lowerCAmelCase_ ( self: Any ) -> int:
snake_case_ :str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case_ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case_, snake_case_ :int = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ :int = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
snake_case_ :List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> int:
snake_case_ :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case_ :Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case_, snake_case_ :Optional[Any] = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ :List[str] = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
snake_case_, snake_case_ :Tuple = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case_ :Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
snake_case_, snake_case_ :Optional[int] = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ :int = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
snake_case_, snake_case_ :int = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: List[Any] ) -> str:
snake_case_ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
snake_case_ :List[Any] = self.image_processing_class(do_resize=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , do_rescale=__SCREAMING_SNAKE_CASE )
# create random PyTorch tensors
snake_case_ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ :Union[str, Any] = image_processing_a.pad(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
snake_case_ :Optional[Any] = image_processing_a(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[int]:
snake_case_ :Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
snake_case_ :str = json.loads(f.read() )
snake_case_ :Optional[Any] = {"""image_id""": 39_769, """annotations""": target}
# encode them
snake_case_ :Optional[Any] = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
snake_case_ :str = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
snake_case_ :Union[str, Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , __SCREAMING_SNAKE_CASE )
snake_case_ :int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
snake_case_ :Any = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
snake_case_ :Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __SCREAMING_SNAKE_CASE )
snake_case_ :Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
snake_case_ :Union[str, Any] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
snake_case_ :str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
snake_case_ :str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __SCREAMING_SNAKE_CASE ) )
# verify orig_size
snake_case_ :str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __SCREAMING_SNAKE_CASE ) )
# verify size
snake_case_ :Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case_ :str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
snake_case_ :int = json.loads(f.read() )
snake_case_ :Any = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
snake_case_ :Union[str, Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
snake_case_ :Optional[int] = YolosImageProcessor(format="""coco_panoptic""" )
snake_case_ :List[Any] = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
snake_case_ :Union[str, Any] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , __SCREAMING_SNAKE_CASE )
snake_case_ :Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
snake_case_ :int = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __SCREAMING_SNAKE_CASE ) )
# verify boxes
snake_case_ :Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __SCREAMING_SNAKE_CASE )
snake_case_ :List[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
snake_case_ :Any = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __SCREAMING_SNAKE_CASE ) )
# verify is_crowd
snake_case_ :str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __SCREAMING_SNAKE_CASE ) )
# verify class_labels
snake_case_ :Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __SCREAMING_SNAKE_CASE ) )
# verify masks
snake_case_ :Tuple = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __SCREAMING_SNAKE_CASE )
# verify orig_size
snake_case_ :Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __SCREAMING_SNAKE_CASE ) )
# verify size
snake_case_ :Optional[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __SCREAMING_SNAKE_CASE ) )
| 716 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def A_ ( _lowercase, _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Dict = sorted(zip(_lowercase, _lowercase ), key=lambda _lowercase : x[0] / x[1], reverse=_lowercase )
snake_case_, snake_case_ :Tuple = [i[0] for i in r], [i[1] for i in r]
snake_case_ :List[Any] = list(accumulate(_lowercase ) )
snake_case_ :str = bisect(_lowercase, _lowercase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 0 |
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase_ = {'UserAgent': UserAgent().random}
def __UpperCAmelCase ( __lowerCamelCase ) -> dict:
lowercase__ : str = script.contents[0]
lowercase__ : Dict = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : List[Any] ) -> str:
"""simple docstring"""
lowercase__ : str = f"""https://www.instagram.com/{username}/"""
lowercase__ : List[str] = self.get_json()
def UpperCAmelCase ( self : str ) -> dict:
"""simple docstring"""
lowercase__ : Dict = requests.get(self.url ,headers=_snake_case ).text
lowercase__ : List[str] = BeautifulSoup(_snake_case ,'''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return f"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : List[Any] ) -> str:
"""simple docstring"""
return f"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def UpperCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase ( self : str ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase ( self : int ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def UpperCAmelCase ( self : int ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def __UpperCAmelCase ( __lowerCamelCase = "github" ) -> None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
lowercase__ : int = InstagramUser(__lowerCamelCase )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __lowerCamelCase )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = InstagramUser('github')
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 560 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowercase__ , lowercase__ : Optional[Any] = grid.shape
lowercase__ : List[str] = [-1, 1, 0, 0]
lowercase__ : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase__ , lowercase__ : List[str] = [(0, source)], set()
lowercase__ : List[str] = np.full((rows, cols) , np.inf )
lowercase__ : Optional[int] = 0
lowercase__ : str = np.empty((rows, cols) , dtype=__lowerCamelCase )
lowercase__ : Optional[int] = None
while queue:
((lowercase__) , (lowercase__)) : Tuple = heappop(__lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase__ : Union[str, Any] = []
while (x, y) != source:
path.append((x, y) )
lowercase__ , lowercase__ : Union[str, Any] = predecessors[x, y]
path.append(__lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCamelCase ) ):
lowercase__ , lowercase__ : Any = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase__ : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCamelCase , (dist + 1, (nx, ny)) )
lowercase__ : Optional[Any] = dist + 1
lowercase__ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :List[str] = sorted(numsa + numsa )
UpperCamelCase__ :Dict = divmod(len(__UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = [float(x) for x in input('''Enter the elements of first array: ''').split()]
__snake_case = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 721 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( A__ , A__ ):
"""simple docstring"""
_a = 1
@register_to_config
def __init__( self , UpperCamelCase_=2000 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=1e-3 ):
'''simple docstring'''
UpperCamelCase__ :Dict = None
UpperCamelCase__ :Any = None
UpperCamelCase__ :List[str] = None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , UpperCamelCase_ , device=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCamelCase__ :Optional[int] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCamelCase__ :Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCamelCase__ :Tuple = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCamelCase__ :int = std.unsqueeze(-1 )
UpperCamelCase__ :List[Any] = -score / std
# compute
UpperCamelCase__ :List[str] = -1.0 / len(self.timesteps )
UpperCamelCase__ :Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCamelCase__ :Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCamelCase__ :Any = beta_t.unsqueeze(-1 )
UpperCamelCase__ :Optional[int] = -0.5 * beta_t * x
UpperCamelCase__ :int = torch.sqrt(UpperCamelCase_ )
UpperCamelCase__ :List[str] = drift - diffusion**2 * score
UpperCamelCase__ :Dict = x + drift * dt
# add noise
UpperCamelCase__ :List[Any] = randn_tensor(x.shape , layout=x.layout , generator=UpperCamelCase_ , device=x.device , dtype=x.dtype )
UpperCamelCase__ :int = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps | 280 | 0 |
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[str] = CLIPConfig
UpperCamelCase : int = ["CLIPEncoderLayer"]
def __init__( self , A ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(A )
lowerCamelCase = CLIPVisionModelWithProjection(config.vision_config )
lowerCamelCase = nn.Linear(config.vision_config.projection_dim , 1 )
lowerCamelCase = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def __A ( self , A , A , A=0.5 , A=0.5 ) -> Any:
'''simple docstring'''
lowerCamelCase = self.vision_model(A )[0]
lowerCamelCase = self.p_head(A )
lowerCamelCase = nsfw_detected.flatten()
lowerCamelCase = nsfw_detected > p_threshold
lowerCamelCase = nsfw_detected.tolist()
if any(A ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(A ):
if nsfw_detected_:
lowerCamelCase = np.zeros(images[idx].shape )
lowerCamelCase = self.w_head(A )
lowerCamelCase = watermark_detected.flatten()
lowerCamelCase = watermark_detected > w_threshold
lowerCamelCase = watermark_detected.tolist()
if any(A ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(A ):
if watermark_detected_:
lowerCamelCase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 457 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = "▁"
UpperCAmelCase : str = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Dict = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
UpperCAmelCase : List[Any] = {
"google/pegasus-xsum": 5_12,
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Any = PegasusTokenizer
UpperCamelCase : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self , A=None , A=None , A="<pad>" , A="</s>" , A="<unk>" , A="<mask_2>" , A="<mask_1>" , A=None , A=1_03 , **A , ) -> Any:
'''simple docstring'''
lowerCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(A , A ):
raise TypeError(
F'additional_special_tokens should be of type {type(A )}, but is'
F' {type(A )}' )
lowerCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(A ) , self.offset - 1 )
]
if len(set(A ) ) != len(A ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCamelCase = additional_special_tokens_extended
else:
lowerCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
A , tokenizer_file=A , pad_token=A , eos_token=A , unk_token=A , mask_token=A , mask_token_sent=A , offset=A , additional_special_tokens=A , **A , )
lowerCamelCase = vocab_file
lowerCamelCase = False if not self.vocab_file else True
def __A ( self , A ) -> Dict:
'''simple docstring'''
lowerCamelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(A )
elif token_ids_a is None:
return self._special_token_mask(A ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __A ( self , A , A=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 457 | 1 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 134 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case : list[int] ):
'''simple docstring'''
if len(__snake_case ) == 0:
return array
lowercase , lowercase = min(__snake_case ), max(__snake_case )
# Compute the variables
lowercase = _max - _min + 1
lowercase , lowercase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase = i - _min
lowercase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase = 0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
lowercase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : Optional[Any] = input('Enter numbers separated by comma:\n')
_UpperCamelCase : Any = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 134 | 1 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : str = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
_lowercase : int = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
_lowercase : int = components[:-1] + [test_fn.replace('.py' , '' )]
_lowercase : Union[str, Any] = '.'.join(SCREAMING_SNAKE_CASE )
return test_module_path
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Tuple = get_module_path(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = importlib.import_module(SCREAMING_SNAKE_CASE )
return test_module
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Union[str, Any] = []
_lowercase : Tuple = get_test_module(SCREAMING_SNAKE_CASE )
for attr in dir(SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : str = []
_lowercase : Dict = get_test_module(SCREAMING_SNAKE_CASE )
for attr in dir(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Union[str, Any] = get_test_classes(SCREAMING_SNAKE_CASE )
_lowercase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : List[Any] = test_class()
if hasattr(SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
_lowercase : int = None
if hasattr(SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_lowercase : List[Any] = test.model_tester.__class__
return model_tester
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[str] = get_test_classes(SCREAMING_SNAKE_CASE )
_lowercase : List[str] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Optional[Any] = get_test_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = []
for test_class in test_classes:
_lowercase : List[str] = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x.__name__ )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]:
_lowercase : Dict = get_test_classes(SCREAMING_SNAKE_CASE )
_lowercase : int = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
_lowercase : List[Any] = get_model_classes(SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str:
_lowercase : int = get_model_classes(SCREAMING_SNAKE_CASE )
_lowercase : Any = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return o
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return {to_json(SCREAMING_SNAKE_CASE ): to_json(SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["GLPNFeatureExtractor"]
UpperCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 1 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCAmelCase : str = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
UpperCAmelCase : Optional[Any] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase : Optional[int] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCAmelCase : str = 'allenai'
def lowerCamelCase ( _UpperCamelCase : Dict ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = dict((re.sub(R"""@@$""" , """""" , _UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , _UpperCamelCase ), v) for k, v in d.items() )
__UpperCAmelCase : Any = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__UpperCAmelCase : Union[str, Any] = d[k] # restore
return da
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) -> Any:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__UpperCAmelCase : Tuple = basename(_UpperCamelCase )
__UpperCAmelCase : List[str] = dirname(_UpperCamelCase )
__UpperCAmelCase : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__UpperCAmelCase : Any = cls.hub_models()
__UpperCAmelCase : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
__UpperCAmelCase : Optional[int] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
__UpperCAmelCase : Union[str, Any] = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
__UpperCAmelCase : int = vars(chkpt["""args"""]["""model"""] )
__UpperCAmelCase : Optional[int] = args["""source_lang"""]
__UpperCAmelCase : int = args["""target_lang"""]
__UpperCAmelCase : List[str] = dirname(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = basename(_UpperCamelCase )
# dicts
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCamelCase , f'''dict.{src_lang}.txt''' )
__UpperCAmelCase : str = os.path.join(_UpperCamelCase , f'''dict.{tgt_lang}.txt''' )
__UpperCAmelCase : Dict = Dictionary.load(_UpperCamelCase )
__UpperCAmelCase : Dict = rewrite_dict_keys(src_dict.indices )
__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = os.path.join(_UpperCamelCase , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__UpperCAmelCase : Tuple = True
for k in src_vocab.keys():
if not k.islower():
__UpperCAmelCase : str = False
break
__UpperCAmelCase : int = Dictionary.load(_UpperCamelCase )
__UpperCAmelCase : int = rewrite_dict_keys(tgt_dict.indices )
__UpperCAmelCase : Dict = len(_UpperCamelCase )
__UpperCAmelCase : str = os.path.join(_UpperCamelCase , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
__UpperCAmelCase : Dict = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding="""utf-8""" ) as fin:
__UpperCAmelCase : List[str] = fin.read()
__UpperCAmelCase : int = re.sub(R""" \d+$""" , """""" , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as fout:
fout.write(_UpperCamelCase )
# model config
__UpperCAmelCase : Optional[Any] = os.path.join(_UpperCamelCase , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
__UpperCAmelCase : Tuple = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
__UpperCAmelCase : Any = 5
__UpperCAmelCase : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__UpperCAmelCase : Union[str, Any] = best_score_hparams[model_dir]["""length_penalty"""]
else:
__UpperCAmelCase : Tuple = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
__UpperCAmelCase : Any = os.path.join(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
__UpperCAmelCase : int = chkpt["""models"""][0]
__UpperCAmelCase : Dict = model.state_dict()
# rename keys to start with 'model.'
__UpperCAmelCase : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__UpperCAmelCase : List[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : str = FSMTConfig.from_pretrained(_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
__UpperCAmelCase : int = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_UpperCamelCase , _UpperCamelCase )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 299 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = data
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Any = None
def lowerCamelCase ( ) -> TreeNode:
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
__UpperCAmelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower()
__UpperCAmelCase : queue.Queue = queue.Queue()
__UpperCAmelCase : int = TreeNode(int(_UpperCamelCase ) )
q.put(_UpperCamelCase )
while not q.empty():
__UpperCAmelCase : List[str] = q.get()
__UpperCAmelCase : List[str] = f'''Enter the left node of {node_found.data}: '''
__UpperCAmelCase : Tuple = input(_UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
__UpperCAmelCase : str = TreeNode(int(_UpperCamelCase ) )
__UpperCAmelCase : List[Any] = left_node
q.put(_UpperCamelCase )
__UpperCAmelCase : List[str] = f'''Enter the right node of {node_found.data}: '''
__UpperCAmelCase : Tuple = input(_UpperCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
__UpperCAmelCase : List[str] = TreeNode(int(_UpperCamelCase ) )
__UpperCAmelCase : Tuple = right_node
q.put(_UpperCamelCase )
raise
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase : queue.Queue = queue.Queue()
q.put(_UpperCamelCase )
while not q.empty():
__UpperCAmelCase : str = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase : queue.Queue = queue.Queue()
q.put(_UpperCamelCase )
while not q.empty():
__UpperCAmelCase : Union[str, Any] = []
while not q.empty():
__UpperCAmelCase : Optional[int] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase : list[TreeNode] = []
__UpperCAmelCase : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(_UpperCamelCase )
__UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
__UpperCAmelCase : List[str] = stack.pop()
# start to traverse its right child
__UpperCAmelCase : List[str] = n.right
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase : list[TreeNode] = []
__UpperCAmelCase : Dict = node
while n or stack:
while n:
stack.append(_UpperCamelCase )
__UpperCAmelCase : Tuple = n.left
__UpperCAmelCase : Any = stack.pop()
print(n.data , end=""",""" )
__UpperCAmelCase : List[Any] = n.right
def lowerCamelCase ( _UpperCamelCase : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not node:
return
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = [], []
__UpperCAmelCase : Optional[Any] = node
stacka.append(_UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
__UpperCAmelCase : Tuple = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def lowerCamelCase ( _UpperCamelCase : str = "" , _UpperCamelCase : int=5_0 , _UpperCamelCase : Tuple="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
__UpperCAmelCase ,__UpperCAmelCase : Tuple = divmod(width - len(_UpperCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
UpperCAmelCase : TreeNode = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 299 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A ( unittest.TestCase ):
def lowerCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase : int =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa )
_lowerCamelCase : str =controlnet_params
_lowerCamelCase : Tuple ='bird'
_lowerCamelCase : List[Any] =jax.device_count()
_lowerCamelCase : Union[str, Any] =pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase : Optional[int] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_lowerCamelCase : Optional[Any] =pipe.prepare_image_inputs([canny_image] * num_samples )
_lowerCamelCase : Optional[Any] =jax.random.PRNGKey(0 )
_lowerCamelCase : Dict =jax.random.split(__lowerCAmelCase , jax.device_count() )
_lowerCamelCase : int =replicate(__lowerCAmelCase )
_lowerCamelCase : List[Any] =shard(__lowerCAmelCase )
_lowerCamelCase : Any =shard(__lowerCAmelCase )
_lowerCamelCase : Dict =pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=50 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_lowerCamelCase : Tuple =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : List[Any] =images[0, 253:256, 253:256, -1]
_lowerCamelCase : List[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : Optional[int] =jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase : Any =FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__lowerCAmelCase , from_pt=__lowerCAmelCase , dtype=jnp.bfloataa )
_lowerCamelCase : str =controlnet_params
_lowerCamelCase : Optional[int] ='Chef in the kitchen'
_lowerCamelCase : Any =jax.device_count()
_lowerCamelCase : Optional[Any] =pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase : Union[str, Any] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_lowerCamelCase : Dict =pipe.prepare_image_inputs([pose_image] * num_samples )
_lowerCamelCase : Any =jax.random.PRNGKey(0 )
_lowerCamelCase : Dict =jax.random.split(__lowerCAmelCase , jax.device_count() )
_lowerCamelCase : str =replicate(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] =shard(__lowerCAmelCase )
_lowerCamelCase : Optional[int] =shard(__lowerCAmelCase )
_lowerCamelCase : Tuple =pipe(
prompt_ids=__lowerCAmelCase , image=__lowerCAmelCase , params=__lowerCAmelCase , prng_seed=__lowerCAmelCase , num_inference_steps=50 , jit=__lowerCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_lowerCamelCase : List[str] =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase : Union[str, Any] =images[0, 253:256, 253:256, -1]
_lowerCamelCase : Union[str, Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase : Optional[int] =jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 464 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case = logging.get_logger(__name__)
snake_case = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __a , __a ):
"""simple docstring"""
__A = "bit"
__A = ["preactivation", "bottleneck"]
__A = ["SAME", "VALID"]
def __init__( self : str , __lowerCAmelCase : Optional[int]=3 , __lowerCAmelCase : Tuple=64 , __lowerCAmelCase : Optional[int]=[256, 512, 1024, 2048] , __lowerCAmelCase : str=[3, 4, 6, 3] , __lowerCAmelCase : int="preactivation" , __lowerCAmelCase : int="relu" , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[str]=32 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=32 , __lowerCAmelCase : Tuple=1 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : int=None , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_lowerCAmelCase = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
_lowerCAmelCase = num_channels
_lowerCAmelCase = embedding_size
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = layer_type
_lowerCAmelCase = hidden_act
_lowerCAmelCase = global_padding
_lowerCAmelCase = num_groups
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = embedding_dynamic_padding
_lowerCAmelCase = output_stride
_lowerCAmelCase = width_factor
_lowerCAmelCase = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 309 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def _A (lowerCAmelCase__ :str ) -> list[str]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(lowerCAmelCase__ ) )]
def _A (lowerCAmelCase__ :str ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
_a = all_rotations(lowerCAmelCase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_a = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowerCAmelCase__ ),
}
return response
def _A (lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
_a = int(lowerCAmelCase__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(lowerCAmelCase__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
_a = [''] * len(lowerCAmelCase__ )
for _ in range(len(lowerCAmelCase__ ) ):
for i in range(len(lowerCAmelCase__ ) ):
_a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a_ : int = "Provide a string that I will generate its BWT transform: "
a_ : Tuple = input(entry_msg).strip()
a_ : Optional[int] = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result['bwt_string']}\''''
)
a_ : int = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
f'''we get original string \'{original_string}\''''
)
| 532 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """dandelin/vilt-b32-finetuned-vqa"""
_lowerCAmelCase = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
_lowerCAmelCase = """image_qa"""
_lowerCAmelCase = AutoProcessor
_lowerCAmelCase = AutoModelForVisualQuestionAnswering
_lowerCAmelCase = ["""image""", """text"""]
_lowerCAmelCase = ["""text"""]
def __init__( self , *__magic_name__ , **__magic_name__ ) -> Tuple:
requires_backends(self , ['vision'] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Tuple:
return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='pt' )
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
with torch.no_grad():
return self.model(**__magic_name__ ).logits
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
_a = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 532 | 1 |
from jiwer import compute_measures
import datasets
UpperCAmelCase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
UpperCAmelCase = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
UpperCAmelCase = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , snake_case=False ):
if concatenate_texts:
return compute_measures(snake_case , snake_case )["wer"]
else:
lowercase = 0
lowercase = 0
for prediction, reference in zip(snake_case , snake_case ):
lowercase = compute_measures(snake_case , snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 84 |
from __future__ import annotations
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = str(__SCREAMING_SNAKE_CASE )
return n == n[::-1]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 ):
lowercase = 0
for i in range(1 , __SCREAMING_SNAKE_CASE ):
if is_palindrome(__SCREAMING_SNAKE_CASE ) and is_palindrome(bin(__SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 84 | 1 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowercase ( __snake_case : dict ):
return (data["data"], data["target"])
def lowercase ( __snake_case : np.ndarray , __snake_case : np.ndarray ):
lowercase_ : Optional[Any] = XGBClassifier()
classifier.fit(UpperCAmelCase__ , UpperCAmelCase__ )
return classifier
def lowercase ( ):
lowercase_ : Optional[int] = load_iris()
lowercase_ , lowercase_ : List[Any] = data_handling(UpperCAmelCase__ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Dict = train_test_split(
UpperCAmelCase__ , UpperCAmelCase__ , test_size=0.25 )
lowercase_ : List[Any] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
lowercase_ : List[Any] = xgboost(UpperCAmelCase__ , UpperCAmelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , display_labels=UpperCAmelCase__ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 710 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowercase ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowercase_ : Any = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
def lowercase ( __snake_case : Any ):
lowercase_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase_ : Dict = value
else:
lowercase_ : Tuple = value
return new_state_dict
def lowercase ( __snake_case : List[str] , __snake_case : Any=False ):
lowercase_ : Optional[int] = ''''''
if is_panoptic:
lowercase_ : Optional[int] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:2_5_6, :]
lowercase_ : Tuple = in_proj_bias[:2_5_6]
lowercase_ : Optional[Any] = in_proj_weight[2_5_6:5_1_2, :]
lowercase_ : str = in_proj_bias[2_5_6:5_1_2]
lowercase_ : str = in_proj_weight[-2_5_6:, :]
lowercase_ : Tuple = in_proj_bias[-2_5_6:]
def lowercase ( ):
lowercase_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : Optional[int] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase ( __snake_case : str , __snake_case : List[Any] ):
lowercase_ : List[str] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase_ : Optional[Any] = '''resnet101'''
if "dc5" in model_name:
lowercase_ : Any = True
lowercase_ : int = '''panoptic''' in model_name
if is_panoptic:
lowercase_ : List[Any] = 2_5_0
else:
lowercase_ : List[Any] = 9_1
lowercase_ : List[str] = '''huggingface/label-files'''
lowercase_ : Union[str, Any] = '''coco-detection-id2label.json'''
lowercase_ : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ : Union[str, Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : Any = idalabel
lowercase_ : Any = {v: k for k, v in idalabel.items()}
# load image processor
lowercase_ : Optional[int] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase_ : Tuple = ConditionalDetrImageProcessor(format=__snake_case )
# prepare image
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=__snake_case , return_tensors='''pt''' )
lowercase_ : List[str] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
lowercase_ : Dict = torch.hub.load('''DeppMeng/ConditionalDETR''' , __snake_case , pretrained=__snake_case ).eval()
lowercase_ : int = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase_ : Union[str, Any] = '''conditional_detr.''' + src
rename_key(__snake_case , __snake_case , __snake_case )
lowercase_ : int = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase_ : List[Any] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase_ : Optional[int] = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase_ : str = state_dict.pop(__snake_case )
lowercase_ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase_ : Dict = state_dict.pop(__snake_case )
lowercase_ : Tuple = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase_ : Tuple = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
# finally, create HuggingFace model and load state dict
lowercase_ : Dict = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
model.push_to_hub(repo_id=__snake_case , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase_ : Optional[int] = conditional_detr(__snake_case )
lowercase_ : List[str] = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__A : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 141 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowercase : str = "text"
lowercase : str = "labels"
def __lowerCamelCase ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : Any = copy.deepcopy(self )
__UpperCamelCase : Any = self.label_schema.copy()
__UpperCamelCase : List[Any] = features[self.label_column]
__UpperCamelCase : Any = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
} | 327 |
import unittest
from transformers import DonutProcessor
lowercase : Optional[int] = "naver-clova-ix/donut-base"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = DonutProcessor.from_pretrained(__UpperCamelCase )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Tuple = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
__UpperCamelCase : int = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
__UpperCamelCase : List[str] = self.processor.tokenajson(__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , __UpperCamelCase ) | 327 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCAmelCase_ ( __lowerCAmelCase ) -> List[str]:
# getting number of pixels in the image
__lowercase , __lowercase : Union[str, Any] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
__lowercase : Any = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowerCAmelCase : Dict = imread("image_data/lena.jpg", 1)
# convert to its negative
__lowerCAmelCase : int = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 284 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def snake_case_ ( self : Optional[int] ):
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : Tuple = 8
# DPR tok
__lowercase : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase : Optional[Any] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
__lowercase : str = os.path.join(_snake_case , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowercase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowercase : Optional[Any] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase : Any = {'''unk_token''': '''<unk>'''}
__lowercase : int = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_snake_case , exist_ok=_snake_case )
__lowercase : List[Any] = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Tuple = os.path.join(_snake_case , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def snake_case_ ( self : List[Any] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case_ ( self : Dict ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def snake_case_ ( self : Optional[int] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def snake_case_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Tuple ):
__lowercase : Dict = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Union[str, Any] = self.get_dummy_dataset()
__lowercase : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowercase : List[Any] = dataset
__lowercase : str = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case_ ( self : int , _snake_case : bool ):
__lowercase : Dict = self.get_dummy_dataset()
__lowercase : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowercase : List[Any] = os.path.join(self.tmpdirname , '''dataset''' )
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowercase : Optional[Any] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowercase : Optional[Any] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _snake_case ) , )
return retriever
def snake_case_ ( self : str ):
__lowercase : List[str] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowercase : Optional[int] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowercase : str = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowercase : Dict = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_snake_case , open(_snake_case , '''wb''' ) )
__lowercase : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowercase : Optional[int] = RagRetriever(
_snake_case , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[str] = 1
__lowercase : Tuple = self.get_dummy_canonical_hf_index_retriever()
__lowercase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : str = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : int ):
__lowercase : int = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowercase : Optional[Any] = self.get_dummy_dataset()
retriever.save_pretrained(_snake_case )
__lowercase : str = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Union[str, Any] = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : str ):
__lowercase : List[str] = 1
__lowercase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
__lowercase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Any ):
__lowercase : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Optional[Any] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : List[Any] = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : List[Any] ):
__lowercase : Any = 1
__lowercase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
__lowercase : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Union[str, Any] = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Any ):
__lowercase : str = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Optional[int] = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ ( self : Tuple ):
__lowercase : Optional[int] = 1
__lowercase : str = self.get_dummy_legacy_index_retriever()
__lowercase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase , __lowercase , __lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=_snake_case )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_snake_case ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _snake_case )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Tuple = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_snake_case )
__lowercase : Tuple = RagRetriever.from_pretrained(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
__lowercase : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Tuple = retriever.retrieve(_snake_case , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case_ ( self : Optional[Any] ):
import torch
__lowercase : Tuple = 1
__lowercase : Any = self.get_dummy_canonical_hf_index_retriever()
__lowercase : str = [[5, 7], [10, 11]]
__lowercase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Any = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
__lowercase , __lowercase , __lowercase : Tuple = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , _snake_case )
self.assertIsInstance(_snake_case , np.ndarray )
__lowercase : Optional[Any] = retriever(
_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case , return_tensors='''pt''' , )
__lowercase , __lowercase , __lowercase , __lowercase : Optional[Any] = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
self.assertIsInstance(_snake_case , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case_ ( self : List[Any] ):
__lowercase : Tuple = self.get_dpr_ctx_encoder_tokenizer()
__lowercase : str = 1
__lowercase : int = self.get_dummy_custom_hf_index_retriever(from_disk=_snake_case )
retriever.set_ctx_encoder_tokenizer(_snake_case )
__lowercase : Tuple = [[5, 7], [10, 11]]
__lowercase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowercase : Any = retriever(_snake_case , _snake_case , prefix=retriever.config.generator.prefix , n_docs=_snake_case )
self.assertEqual(
len(_snake_case ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _snake_case ) # check for doc token related keys in dictionary.
| 284 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
if not (isinstance(__lowerCAmelCase , __lowerCAmelCase ) and isinstance(__lowerCAmelCase , __lowerCAmelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
lowercase_ = len(__lowerCAmelCase )
lowercase_ = len(__lowerCAmelCase )
lowercase_ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowercase_ = 0
lowercase_ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowercase_ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowercase_ = i
lowercase_ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE () -> Tuple:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""-m""" , """--pretrained_model_name_or_path""" , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , )
parser.add_argument(
"""-c""" , """--caption""" , type=__lowerCAmelCase , default="""robotic cat with wings""" , help="""Text used to generate images.""" , )
parser.add_argument(
"""-n""" , """--images_num""" , type=__lowerCAmelCase , default=4 , help="""How much images to generate.""" , )
parser.add_argument(
"""-s""" , """--seed""" , type=__lowerCAmelCase , default=42 , help="""Seed for random process.""" , )
parser.add_argument(
"""-ci""" , """--cuda_id""" , type=__lowerCAmelCase , default=0 , help="""cuda_id.""" , )
lowercase_ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
if not len(__lowerCAmelCase ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
lowercase_ , lowercase_ = imgs[0].size
lowercase_ = Image.new("""RGB""" , size=(cols * w, rows * h) )
lowercase_ , lowercase_ = grid.size
for i, img in enumerate(__lowerCAmelCase ):
grid.paste(__lowerCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase="robotic cat with wings" , __lowerCAmelCase=7.5 , __lowerCAmelCase=50 , __lowerCAmelCase=1 , __lowerCAmelCase=42 , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = torch.Generator(pipeline.device ).manual_seed(__lowerCAmelCase )
lowercase_ = pipeline(
__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=__lowerCAmelCase , generator=__lowerCAmelCase , num_images_per_prompt=__lowerCAmelCase , ).images
lowercase_ = int(math.sqrt(__lowerCAmelCase ) )
lowercase_ = image_grid(__lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase : str = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
UpperCAmelCase : Union[str, Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
UpperCAmelCase : Optional[Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
UpperCAmelCase : Dict = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
UpperCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase : List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
UpperCAmelCase : Any = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
UpperCAmelCase : Any = unet.to(torch.device("cuda", args.cuda_id))
UpperCAmelCase : Optional[int] = pipeline.to(unet.device)
UpperCAmelCase , UpperCAmelCase : Tuple = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
UpperCAmelCase : str = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 567 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {"""configuration_opt""": ["""OPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OPTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""OPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OPTForCausalLM""",
"""OPTModel""",
"""OPTPreTrainedModel""",
"""OPTForSequenceClassification""",
"""OPTForQuestionAnswering""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ["""TFOPTForCausalLM""", """TFOPTModel""", """TFOPTPreTrainedModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""FlaxOPTForCausalLM""",
"""FlaxOPTModel""",
"""FlaxOPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
def _UpperCAmelCase ( a : int = 1000 ):
snake_case__ , snake_case__ = 1, 1
snake_case__ = 2
while True:
snake_case__ = 0
snake_case__ = fa + fa
snake_case__ , snake_case__ = fa, f
index += 1
for _ in str(a ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 99 | 0 |
'''simple docstring'''
def A_( A : int = 5000_0000):
UpperCamelCase = set()
UpperCamelCase = int((limit - 24) ** (1 / 2))
UpperCamelCase = set(range(3 , prime_square_limit + 1 , 2))
primes.add(2)
for p in range(3 , prime_square_limit + 1 , 2):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , A)))
for primea in primes:
UpperCamelCase = primea * primea
for primea in primes:
UpperCamelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase = primea * primea * primea * primea
UpperCamelCase = square + cube + tetr
if total >= limit:
break
ret.add(A)
return len(A)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any ,lowercase_ : Dict ,lowercase_ : List[str]=7 ,lowercase_ : Tuple=3 ,lowercase_ : List[str]=1_8 ,lowercase_ : Optional[Any]=3_0 ,lowercase_ : List[Any]=4_0_0 ,lowercase_ : List[Any]=True ,lowercase_ : Any=None ,lowercase_ : Optional[Any]=True ,lowercase_ : str=None ,lowercase_ : List[Any]=True ,lowercase_ : Dict=[0.5, 0.5, 0.5] ,lowercase_ : Dict=[0.5, 0.5, 0.5] ,):
lowerCAmelCase__ : Any = size if size is not None else {'''shortest_edge''': 1_8}
lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Union[str, Any] = min_resolution
lowerCAmelCase__ : Dict = max_resolution
lowerCAmelCase__ : List[str] = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Tuple = do_center_crop
lowerCAmelCase__ : Optional[int] = crop_size
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Tuple = image_mean
lowerCAmelCase__ : int = image_std
def __lowerCAmelCase ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Optional[Any] = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ ,'''image_mean''' ) )
self.assertTrue(hasattr(lowercase_ ,'''image_std''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_normalize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase_ ,'''do_center_crop''' ) )
self.assertTrue(hasattr(lowercase_ ,'''size''' ) )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size ,{'''height''': 1_8, '''width''': 1_8} )
lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
def __lowerCAmelCase ( self : Optional[int] ):
# Initialize image_processing
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Any = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Union[str, Any] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCAmelCase ( self : int ):
# Initialize image_processing
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase_ ,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
lowerCAmelCase__ : Optional[int] = image_processing(lowercase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 450 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __A ( a_ :List[str] , a_ :Optional[Any] , a_ :Optional[int]) -> Optional[Any]:
# Initialise PyTorch model
__a : Optional[int] = RemBertConfig.from_json_file(a_)
print('''Building PyTorch model from configuration: {}'''.format(str(a_)))
__a : Optional[Any] = RemBertModel(a_)
# Load weights from tf checkpoint
load_tf_weights_in_rembert(a_ , a_ , a_)
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(a_))
torch.save(model.state_dict() , a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 101 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : list[list[Edge]] = [[] for _ in range(_UpperCAmelCase )]
__a : Dict = size
def __getitem__( self , _UpperCAmelCase ):
return iter(self._graph[vertex] )
@property
def _lowerCamelCase ( self ):
return self._size
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if weight not in (0, 1):
raise ValueError('''Edge weight must be either 0 or 1.''' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('''Vertex indexes must be in [0; size).''' )
self._graph[from_vertex].append(Edge(_UpperCAmelCase , _UpperCAmelCase ) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = deque([start_vertex] )
__a : list[int | None] = [None] * self.size
__a : int = 0
while queue:
__a : Any = queue.popleft()
__a : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__a : List[str] = current_distance + edge.weight
__a : str = distances[edge.destination_vertex]
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
__a : str = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('''No path from start_vertex to finish_vertex.''' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 101 | 1 |
_lowerCAmelCase: Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCAmelCase: int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCAmelCase: Any = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def _lowercase( __a : int , __a : int , __a : int ):
assert len(str(__a ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a__ =year // 100
a__ =(5 * (century % 4) + 2) % 7
a__ =year % 100
a__ =centurian % 12
a__ =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a__ =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a__ =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
'''simple docstring'''
def A_ ( snake_case = 1000 ):
SCREAMING_SNAKE_CASE:Tuple = 2**power
SCREAMING_SNAKE_CASE:Optional[int] = str(snake_case )
SCREAMING_SNAKE_CASE:int = list(snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = 0
for i in list_num:
sum_of_num += int(snake_case )
return sum_of_num
if __name__ == "__main__":
A_ = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
A_ = solution(power)
print("Sum of the digits is: ", result)
| 143 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
a : List[str] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
a : Union[str, Any] = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case__ ( ) ->Union[str, Any]:
UpperCAmelCase__ = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ = bs[:]
UpperCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Dict:
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : int = ['input_ids', 'attention_mask']
def __init__( self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ):
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
super().__init__(
errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ = json.load(__lowercase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ = errors # how to handle errors in decoding
UpperCAmelCase__ = bytes_to_unicode()
UpperCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
UpperCAmelCase__ = {}
UpperCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def A__ ( self ):
return len(self.encoder )
def A__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , __lowercase ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = tuple(__lowercase )
UpperCAmelCase__ = get_pairs(__lowercase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__lowercase ):
try:
UpperCAmelCase__ = word.index(__lowercase , __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__lowercase )
UpperCAmelCase__ = new_word
if len(__lowercase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__lowercase )
UpperCAmelCase__ = """ """.join(__lowercase )
UpperCAmelCase__ = word
return word
def A__ ( self , __lowercase ):
UpperCAmelCase__ = []
for token in re.findall(self.pat , __lowercase ):
UpperCAmelCase__ = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(""" """ ) )
return bpe_tokens
def A__ ( self , __lowercase ):
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def A__ ( self , __lowercase ):
return self.decoder.get(__lowercase )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = """""".join(__lowercase )
UpperCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def A__ ( self , __lowercase , __lowercase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase__ = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
UpperCAmelCase__ = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self , __lowercase , __lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
UpperCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def A__ ( self , __lowercase , __lowercase = None ):
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , __lowercase , __lowercase=False , **__lowercase ):
UpperCAmelCase__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
UpperCAmelCase__ = """ """ + text
return (text, kwargs)
| 422 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ : str ='''ResNetConfig'''
# Base docstring
lowerCAmelCase__ : List[Any] ='''microsoft/resnet-50'''
lowerCAmelCase__ : Optional[int] =[1, 2048, 7, 7]
# Image classification docstring
lowerCAmelCase__ : Optional[int] ='''microsoft/resnet-50'''
lowerCAmelCase__ : Optional[Any] ='''tiger cat'''
lowerCAmelCase__ : Optional[Any] =[
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A = 3 , _A = 1 , _A = "relu" ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Convad(
_A , _A , kernel_size=_A , stride=_A , padding=kernel_size // 2 , bias=_A )
__SCREAMING_SNAKE_CASE = nn.BatchNormad(_A )
__SCREAMING_SNAKE_CASE = ACTaFN[activation] if activation is not None else nn.Identity()
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convolution(_A )
__SCREAMING_SNAKE_CASE = self.normalization(_A )
__SCREAMING_SNAKE_CASE = self.activation(_A )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__SCREAMING_SNAKE_CASE = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__SCREAMING_SNAKE_CASE = config.num_channels
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__SCREAMING_SNAKE_CASE = self.embedder(_A )
__SCREAMING_SNAKE_CASE = self.pooler(_A )
return embedding
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A = 2 ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Convad(_A , _A , kernel_size=1 , stride=_A , bias=_A )
__SCREAMING_SNAKE_CASE = nn.BatchNormad(_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.convolution(_A )
__SCREAMING_SNAKE_CASE = self.normalization(_A )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A = 1 , _A = "relu" ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
__SCREAMING_SNAKE_CASE = (
ResNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__SCREAMING_SNAKE_CASE = nn.Sequential(
ResNetConvLayer(_A , _A , stride=_A ) , ResNetConvLayer(_A , _A , activation=_A ) , )
__SCREAMING_SNAKE_CASE = ACTaFN[activation]
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_state
__SCREAMING_SNAKE_CASE = self.layer(_A )
__SCREAMING_SNAKE_CASE = self.shortcut(_A )
hidden_state += residual
__SCREAMING_SNAKE_CASE = self.activation(_A )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A = 1 , _A = "relu" , _A = 4 ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = in_channels != out_channels or stride != 1
__SCREAMING_SNAKE_CASE = out_channels // reduction
__SCREAMING_SNAKE_CASE = (
ResNetShortCut(_A , _A , stride=_A ) if should_apply_shortcut else nn.Identity()
)
__SCREAMING_SNAKE_CASE = nn.Sequential(
ResNetConvLayer(_A , _A , kernel_size=1 ) , ResNetConvLayer(_A , _A , stride=_A ) , ResNetConvLayer(_A , _A , kernel_size=1 , activation=_A ) , )
__SCREAMING_SNAKE_CASE = ACTaFN[activation]
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_state
__SCREAMING_SNAKE_CASE = self.layer(_A )
__SCREAMING_SNAKE_CASE = self.shortcut(_A )
hidden_state += residual
__SCREAMING_SNAKE_CASE = self.activation(_A )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A = 2 , _A = 2 , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
__SCREAMING_SNAKE_CASE = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_A , _A , stride=_A , activation=config.hidden_act ) , *[layer(_A , _A , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input
for layer in self.layers:
__SCREAMING_SNAKE_CASE = layer(_A )
return hidden_state
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__SCREAMING_SNAKE_CASE = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_A , config.depths[1:] ):
self.stages.append(ResNetStage(_A , _A , _A , depth=_A ) )
def _A ( self , _A , _A = False , _A = True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
__SCREAMING_SNAKE_CASE = stage_module(_A )
if output_hidden_states:
__SCREAMING_SNAKE_CASE = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_A , hidden_states=_A , )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ResNetConfig
UpperCamelCase__ : List[Any] = '''resnet'''
UpperCamelCase__ : Optional[Any] = '''pixel_values'''
UpperCamelCase__ : Tuple = True
def _A ( self , _A ):
'''simple docstring'''
if isinstance(_A , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_A , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _A ( self , _A , _A=False ):
'''simple docstring'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = value
lowerCAmelCase__ : int =r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ : List[Any] =r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCamelCase_ , )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__(_A )
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ResNetEmbeddings(_A )
__SCREAMING_SNAKE_CASE = ResNetEncoder(_A )
__SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _A ( self , _A , _A = None , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = self.embedder(_A )
__SCREAMING_SNAKE_CASE = self.encoder(
_A , output_hidden_states=_A , return_dict=_A )
__SCREAMING_SNAKE_CASE = encoder_outputs[0]
__SCREAMING_SNAKE_CASE = self.pooler(_A )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCamelCase_ , )
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__(_A )
__SCREAMING_SNAKE_CASE = config.num_labels
__SCREAMING_SNAKE_CASE = ResNetModel(_A )
# classification head
__SCREAMING_SNAKE_CASE = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _A ( self , _A = None , _A = None , _A = None , _A = None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = self.resnet(_A , output_hidden_states=_A , return_dict=_A )
__SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
__SCREAMING_SNAKE_CASE = self.classifier(_A )
__SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__SCREAMING_SNAKE_CASE = 'single_label_classification'
else:
__SCREAMING_SNAKE_CASE = 'multi_label_classification'
if self.config.problem_type == "regression":
__SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
__SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__SCREAMING_SNAKE_CASE = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
__SCREAMING_SNAKE_CASE = CrossEntropyLoss()
__SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
__SCREAMING_SNAKE_CASE = loss_fct(_A , _A )
if not return_dict:
__SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCamelCase_ , )
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A ):
'''simple docstring'''
super().__init__(_A )
super()._init_backbone(_A )
__SCREAMING_SNAKE_CASE = [config.embedding_size] + config.hidden_sizes
__SCREAMING_SNAKE_CASE = ResNetEmbeddings(_A )
__SCREAMING_SNAKE_CASE = ResNetEncoder(_A )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@replace_return_docstrings(output_type=_A , config_class=_CONFIG_FOR_DOC )
def _A ( self , _A , _A = None , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
__SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__SCREAMING_SNAKE_CASE = self.embedder(_A )
__SCREAMING_SNAKE_CASE = self.encoder(_A , output_hidden_states=_A , return_dict=_A )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__SCREAMING_SNAKE_CASE = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_A , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_A , )
| 148 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase__ : Optional[int] ='''docs/source/en/_toctree.yml'''
def __lowercase ( a__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = defaultdict(a__ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(a__ )
__SCREAMING_SNAKE_CASE = new_doc_list
__SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1]
__SCREAMING_SNAKE_CASE = []
for duplicate_key in duplicates:
__SCREAMING_SNAKE_CASE = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(a__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__SCREAMING_SNAKE_CASE = sorted(a__ , key=lambda a__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(a__ )
# Sort
return overview_doc
def __lowercase ( a__=False ) -> List[Any]:
with open(a__ , encoding='utf-8' ) as f:
__SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
__SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__SCREAMING_SNAKE_CASE = content[api_idx]['sections']
# Then to the model doc
__SCREAMING_SNAKE_CASE = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__SCREAMING_SNAKE_CASE = api_doc[scheduler_idx]['sections']
__SCREAMING_SNAKE_CASE = clean_doc_toc(a__ )
__SCREAMING_SNAKE_CASE = False
if new_scheduler_doc != scheduler_doc:
__SCREAMING_SNAKE_CASE = True
if overwrite:
__SCREAMING_SNAKE_CASE = new_scheduler_doc
if diff:
if overwrite:
__SCREAMING_SNAKE_CASE = api_doc
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a__ , allow_unicode=a__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __lowercase ( a__=False ) -> Union[str, Any]:
with open(a__ , encoding='utf-8' ) as f:
__SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
__SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__SCREAMING_SNAKE_CASE = content[api_idx]['sections']
# Then to the model doc
__SCREAMING_SNAKE_CASE = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = api_doc[pipeline_idx]['sections']
__SCREAMING_SNAKE_CASE = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__SCREAMING_SNAKE_CASE = pipeline_doc['section']
__SCREAMING_SNAKE_CASE = clean_doc_toc(a__ )
if overwrite:
__SCREAMING_SNAKE_CASE = new_sub_pipeline_doc
new_pipeline_docs.append(a__ )
# sort overall pipeline doc
__SCREAMING_SNAKE_CASE = clean_doc_toc(a__ )
if new_pipeline_docs != pipeline_docs:
__SCREAMING_SNAKE_CASE = True
if overwrite:
__SCREAMING_SNAKE_CASE = new_pipeline_docs
if diff:
if overwrite:
__SCREAMING_SNAKE_CASE = api_doc
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a__ , allow_unicode=a__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : str =argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ : Optional[int] =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 148 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[str] , a: Optional[int] , a: Dict ):
return F'gaussian_noise_s={seed}_shape={"_".join([str(a ) for s in shape] )}.npy'
def _snake_case ( self: List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self: Dict , a: int=0 , a: Tuple=(4, 4, 64, 64) , a: Union[str, Any]=False ):
__lowerCamelCase : Tuple = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(a , a ) ) , dtype=a )
return image
def _snake_case ( self: Optional[Any] , a: List[Any]=False , a: int="CompVis/stable-diffusion-v1-4" ):
__lowerCamelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[Any] = 'bf16' if fpaa else None
__lowerCamelCase , __lowerCamelCase : List[str] = FlaxUNetaDConditionModel.from_pretrained(
a , subfolder='unet' , dtype=a , revision=a )
return model, params
def _snake_case ( self: List[str] , a: Dict=0 , a: Optional[int]=(4, 77, 768) , a: List[Any]=False ):
__lowerCamelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(a , a ) ) , dtype=a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def _snake_case ( self: Optional[int] , a: Optional[Any] , a: Dict , a: str ):
__lowerCamelCase , __lowerCamelCase : int = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=a )
__lowerCamelCase : str = self.get_latents(a , fpaa=a )
__lowerCamelCase : Dict = self.get_encoder_hidden_states(a , fpaa=a )
__lowerCamelCase : Optional[Any] = model.apply(
{'params': params} , a , jnp.array(a , dtype=jnp.intaa ) , encoder_hidden_states=a , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Tuple = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Any = jnp.array(a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(a , a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def _snake_case ( self: Optional[Any] , a: List[Any] , a: Tuple , a: Dict ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=a )
__lowerCamelCase : Any = self.get_latents(a , shape=(4, 4, 96, 96) , fpaa=a )
__lowerCamelCase : Dict = self.get_encoder_hidden_states(a , shape=(4, 77, 1024) , fpaa=a )
__lowerCamelCase : int = model.apply(
{'params': params} , a , jnp.array(a , dtype=jnp.intaa ) , encoder_hidden_states=a , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : List[str] = jnp.array(a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(a , a , atol=1e-2 )
| 230 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowerCamelCase : Tuple = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 379 |
'''simple docstring'''
def lowerCAmelCase_ ( ):
a__ = []
a__ = 1
while len(a ) < 1e6:
constant.append(str(a ) )
i += 1
a__ = ''.join(a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 394 | 0 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _A ( _a : Optional[int] ):
"""simple docstring"""
A = {}
A = tokenizer(example["""content"""] , truncation=_a )["""input_ids"""]
A = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase =HfArgumentParser(PretokenizationArguments)
UpperCAmelCase =parser.parse_args()
if args.num_workers is None:
UpperCAmelCase =multiprocessing.cpu_count()
UpperCAmelCase =AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase =time.time()
UpperCAmelCase =load_dataset(args.dataset_name, split="train")
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
UpperCAmelCase =time.time()
UpperCAmelCase =ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
UpperCAmelCase =time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 719 |
"""simple docstring"""
UpperCAmelCase =256
# Modulus to hash a string
UpperCAmelCase =1_000_003
def _A ( _a : str , _a : str ):
"""simple docstring"""
A = len(_a )
A = len(_a )
if p_len > t_len:
return False
A = 0
A = 0
A = 1
# Calculating the hash of pattern and substring of text
for i in range(_a ):
A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ):
"""simple docstring"""
A = """abc1abc12"""
A = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_a , _a ) and not rabin_karp(_a , _a )
# Test 2)
A = """ABABX"""
A = """ABABZABABYABABX"""
assert rabin_karp(_a , _a )
# Test 3)
A = """AAAB"""
A = """ABAAAAAB"""
assert rabin_karp(_a , _a )
# Test 4)
A = """abcdabcy"""
A = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_a , _a )
# Test 5)
A = """Lü"""
A = """Lüsai"""
assert rabin_karp(_a , _a )
A = """Lue"""
assert not rabin_karp(_a , _a )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 255 | 0 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger()
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : list = field(default_factory=lowerCamelCase_ )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCamelCase , nn.Convad ) or isinstance(_UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_UpperCamelCase )
def __call__( self , _UpperCamelCase ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return list(filter(lambda _UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : nn.Module
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : List = field(default_factory=lowerCamelCase_ )
_SCREAMING_SNAKE_CASE : List = field(default_factory=lowerCamelCase_ )
def __call__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = Tracker(self.dest )(_UpperCamelCase ).parametrized
_lowercase : Tuple = Tracker(self.src )(_UpperCamelCase ).parametrized
_lowercase : Optional[Any] = list(filter(lambda _UpperCamelCase : type(_UpperCamelCase ) not in self.src_skip , _UpperCamelCase ) )
_lowercase : int = list(filter(lambda _UpperCamelCase : type(_UpperCamelCase ) not in self.dest_skip , _UpperCamelCase ) )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(_UpperCamelCase )} operations while'''
f''' destination module has {len(_UpperCamelCase )}.''' )
for dest_m, src_m in zip(_UpperCamelCase , _UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def _A ( snake_case , snake_case , snake_case , snake_case = True ) -> Union[str, Any]:
print(F'''Converting {name}...''' )
with torch.no_grad():
_lowercase : List[Any] = timm.create_model(snake_case , pretrained=snake_case ).eval()
_lowercase : str = ResNetForImageClassification(snake_case ).eval()
_lowercase : Dict = ModuleTransfer(src=snake_case , dest=snake_case )
_lowercase : Any = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(snake_case )
assert torch.allclose(from_model(snake_case ) , our_model(snake_case ).logits ), "The model logits don't match the original one."
_lowercase : Optional[Any] = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(snake_case )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=snake_case , )
# we can use the convnext one
_lowercase : Any = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=snake_case , )
print(F'''Pushed {checkpoint_name}''' )
def _A ( snake_case , snake_case = None , snake_case = True ) -> Dict:
_lowercase : List[str] = "imagenet-1k-id2label.json"
_lowercase : int = 10_00
_lowercase : int = (1, num_labels)
_lowercase : Any = "huggingface/label-files"
_lowercase : str = num_labels
_lowercase : Optional[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
_lowercase : Any = {int(snake_case ): v for k, v in idalabel.items()}
_lowercase : Optional[Any] = idalabel
_lowercase : Tuple = {v: k for k, v in idalabel.items()}
_lowercase : Any = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case )
_lowercase : List[str] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(snake_case , names_to_config[model_name] , snake_case , snake_case )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case , snake_case , snake_case , snake_case )
return config, expected_shape
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_snake_case = parser.parse_args()
_snake_case = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 245 |
'''simple docstring'''
def _A ( snake_case , snake_case ) -> int:
if len(snake_case ) != len(snake_case ):
raise ValueError("String lengths must match!" )
_lowercase : Union[str, Any] = 0
for chara, chara in zip(snake_case , snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 | 1 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase, _lowercase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __UpperCamelCase ( ) -> None:
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 713 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_A : List[str] ='''pt'''
elif is_tf_available():
_A : Tuple ='''tf'''
else:
_A : Optional[int] ='''jax'''
class lowerCamelCase__ ( A , unittest.TestCase ):
'''simple docstring'''
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
_lowercase : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self : int , **UpperCamelCase_ : List[Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=20 , UpperCamelCase_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
_lowercase : Dict = []
for i in range(len(UpperCamelCase_ ) ):
try:
_lowercase : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowercase : Optional[Any] = list(filter(lambda UpperCamelCase_ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase_ ) )
_lowercase : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
_lowercase : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
_lowercase : Tuple = toks + toks
# toks_str = [t[1] for t in toks]
_lowercase : Dict = [t[0] for t in toks]
# Ensure consistency
_lowercase : Any = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
_lowercase : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
_lowercase : Union[str, Any] = ' ' + output_txt
_lowercase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
_lowercase : List[str] = self.ta_base_tokenizer
_lowercase : Union[str, Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowercase : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = self.ta_base_tokenizer
_lowercase : Tuple = 'Unicode €.'
_lowercase : List[Any] = tokenizer(UpperCamelCase_ )
_lowercase : List[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : List[str] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'Unicode €.</s>' )
_lowercase : Any = tokenizer('e è é ê ë' )
_lowercase : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase_ )
# decoding
_lowercase : Tuple = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowercase : List[Any] = self.ta_base_tokenizer
_lowercase : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowercase : Any = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_lowercase : Dict = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
if FRAMEWORK != "jax":
_lowercase : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
_lowercase : List[str] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = self.ta_base_tokenizer
_lowercase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowercase : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase_ )
self.assertIn('attention_mask' , UpperCamelCase_ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase_ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase_ )
def __UpperCAmelCase ( self : Any ) -> int:
'''simple docstring'''
_lowercase : Tuple = self.ta_base_tokenizer
_lowercase : Optional[Any] = [
'Summary of the text.',
'Another summary.',
]
_lowercase : str = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='max_length' , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
_lowercase : str = self.ta_base_tokenizer
_lowercase : str = ['A long paragraph for summarization. </s>']
_lowercase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
_lowercase : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_lowercase : Optional[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_lowercase : Any = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase_ , batch['labels'][0] )
def __UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowercase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : List[Any] = tempfile.mkdtemp()
_lowercase : Any = ' He is very happy, UNwant\u00E9d,running'
_lowercase : Union[str, Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : Optional[int] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Tuple = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
_lowercase : str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowercase : Dict = tempfile.mkdtemp()
_lowercase : List[Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowercase : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
_lowercase : List[str] = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
_lowercase : Dict = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowercase : Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowercase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowercase : int = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowercase : Tuple = json.load(UpperCamelCase_ )
_lowercase : List[Any] = [F'''<extra_id_{i}>''' for i in range(125 )]
_lowercase : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowercase : int = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase_ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowercase : Optional[Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowercase : List[str] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase_ )]
_lowercase : Tuple = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
_lowercase : str = tokenizer_class.from_pretrained(UpperCamelCase_ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowercase : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
_lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowercase : Optional[int] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowercase : Optional[int] = 0
_lowercase : int = tokenizer.convert_ids_to_tokens(
UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '_id' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '_id' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase_ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase_ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : List[Any] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ["""ChineseCLIPFeatureExtractor"""]
__lowerCamelCase : Optional[int] = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
_a = None
_a = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
_a = datasets.Audio()
_a = """audio"""
_a = AudioFolderConfig
_a = 42 # definition at the bottom of the script
_a = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowercase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
lowercase_ = AUDIO_EXTENSIONS
| 291 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase, '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase, '''num_attention_heads''' ) )
class snake_case :
'''simple docstring'''
def __init__( self : List[str], _lowerCamelCase : Dict, _lowerCamelCase : Tuple=13, _lowerCamelCase : str=32, _lowerCamelCase : Any=2, _lowerCamelCase : Optional[Any]=3, _lowerCamelCase : List[str]=6_40, _lowerCamelCase : Optional[Any]=4, _lowerCamelCase : List[str]="silu", _lowerCamelCase : List[str]=3, _lowerCamelCase : List[Any]=32, _lowerCamelCase : Optional[Any]=0.1, _lowerCamelCase : Union[str, Any]=0.1, _lowerCamelCase : List[Any]=0.1, _lowerCamelCase : Optional[int]=0.02, _lowerCamelCase : List[Any]=True, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : Any=10, _lowerCamelCase : int=None, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = last_hidden_size
__A = num_attention_heads
__A = hidden_act
__A = conv_kernel_size
__A = output_stride
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size], self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Any, _lowerCamelCase : Tuple, _lowerCamelCase : Optional[Any], _lowerCamelCase : Any ):
'''simple docstring'''
__A = MobileViTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : int, _lowerCamelCase : Optional[int], _lowerCamelCase : List[Any], _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__A = self.num_labels
__A = MobileViTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : str, _lowerCamelCase : Optional[int], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = self.num_labels
__A = MobileViTForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__A = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
__A = model(_lowerCamelCase, labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : int = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Any = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : Tuple = False
A_ : Union[str, Any] = False
A_ : Optional[int] = False
A_ : str = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = MobileViTModelTester(self )
__A = MobileViTConfigTester(self, config_class=_lowerCamelCase, has_text_modality=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(_lowerCamelCase )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ['''pixel_values''']
self.assertListEqual(arg_names[:1], _lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase : str, _lowerCamelCase : List[Any], _lowerCamelCase : Optional[Any] ):
__A = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(_lowerCamelCase, _lowerCamelCase ) )
__A = outputs.hidden_states
__A = 5
self.assertEqual(len(_lowerCamelCase ), _lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__A = 2
for i in range(len(_lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileViTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(_lowerCamelCase )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=_lowerCamelCase, return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__A = model(**_lowerCamelCase )
# verify the logits
__A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape, _lowerCamelCase )
__A = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _lowerCamelCase, atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__A = model.to(_lowerCamelCase )
__A = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__A = prepare_img()
__A = image_processor(images=_lowerCamelCase, return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__A = model(**_lowerCamelCase )
__A = outputs.logits
# verify the logits
__A = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, _lowerCamelCase )
__A = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=_lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], _lowerCamelCase, atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__A = model.to(_lowerCamelCase )
__A = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__A = prepare_img()
__A = image_processor(images=_lowerCamelCase, return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__A = model(**_lowerCamelCase )
__A = outputs.logits.detach().cpu()
__A = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase, target_sizes=[(50, 60)] )
__A = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, _lowerCamelCase )
__A = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
__A = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, _lowerCamelCase )
| 700 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
def get_matched_characters(__UpperCamelCase , __UpperCamelCase ) -> str:
__A = []
__A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A = int(max(0 , i - limit ) )
__A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCamelCase )
__A = f'{_stra[0:_stra.index(__UpperCamelCase )]} {_stra[_stra.index(__UpperCamelCase ) + 1:]}'
return "".join(__UpperCamelCase )
# matching characters
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = len(__UpperCamelCase )
# transposition
__A = (
len([(ca, ca) for ca, ca in zip(__UpperCamelCase , __UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__A = 0.0
else:
__A = (
1
/ 3
* (
match_count / len(__UpperCamelCase )
+ match_count / len(__UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 215 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 404 |
'''simple docstring'''
import os
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =len(grid[0] )
_UpperCamelCase =len(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(n_rows - 3 ):
_UpperCamelCase =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_UpperCamelCase =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_UpperCamelCase =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_UpperCamelCase =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_UpperCamelCase =max(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if max_product > largest:
_UpperCamelCase =max_product
return largest
def _a ():
"""simple docstring"""
_UpperCamelCase =[]
with open(os.path.dirname(__SCREAMING_SNAKE_CASE ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_UpperCamelCase =[[int(__SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(__SCREAMING_SNAKE_CASE ) )]
return largest_product(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 404 | 1 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = []
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = []
for d in reversed(_UpperCAmelCase ):
idx.append(flat_idx % d )
A_ : Tuple = flat_idx // d
return tuple(reversed(_UpperCAmelCase ) )
@torch.jit.ignore
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
"""simple docstring"""
def reduce_edge_list(_UpperCAmelCase ) -> None:
A_ : Dict = True
for i in range(len(_UpperCAmelCase ) ):
A_ : Any = -1 * (i + 1)
l[reversed_idx] &= tally
A_ : Any = l[reversed_idx]
if start_edges is None:
A_ : Any = [s == 0 for s in start]
reduce_edge_list(_UpperCAmelCase )
if end_edges is None:
A_ : Union[str, Any] = [e == (d - 1) for e, d in zip(_UpperCAmelCase , _UpperCAmelCase )]
reduce_edge_list(_UpperCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_UpperCAmelCase ) == 0:
return [()]
elif len(_UpperCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
A_ : List[Tuple[slice, ...]] = []
A_ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_UpperCAmelCase , _UpperCAmelCase ):
if s == e:
path_list.append(slice(_UpperCAmelCase , s + 1 ) )
else:
break
A_ : Tuple[slice, ...] = tuple(_UpperCAmelCase )
A_ : str = len(_UpperCAmelCase )
# start == end, and we're done
if divergence_idx == len(_UpperCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ : Dict = start[divergence_idx]
return tuple(
path + (slice(_UpperCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ : Optional[Any] = end[divergence_idx]
return tuple(
path + (slice(_UpperCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
A_ : Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = t.shape[:no_batch_dims]
A_ : int = list(_flat_idx_to_idx(_UpperCAmelCase , _UpperCAmelCase ) )
# _get_minimal_slice_set is inclusive
A_ : Tuple = list(_flat_idx_to_idx(flat_end - 1 , _UpperCAmelCase ) )
# Get an ordered list of slices to perform
A_ : Tuple = _get_minimal_slice_set(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
A_ : Optional[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
"""simple docstring"""
if not (len(_UpperCAmelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
A_ : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_UpperCAmelCase )]
A_ : Any = tuple([max(_UpperCAmelCase ) for s in zip(*_UpperCAmelCase )] )
def _prep_inputs(_UpperCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
A_ : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
A_ : Dict = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
A_ : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
A_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , _UpperCAmelCase )
A_ : Dict = None
if _out is not None:
A_ : Union[str, Any] = tensor_tree_map(lambda _UpperCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
A_ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
A_ : int = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_UpperCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
A_ : int = 0
A_ : List[str] = prepped_outputs
for _ in range(_UpperCAmelCase ):
# Chunk the input
if not low_mem:
A_ : Dict = _select_chunk
else:
A_ : List[Any] = partial(
_chunk_slice , flat_start=_UpperCAmelCase , flat_end=min(_UpperCAmelCase , i + chunk_size ) , no_batch_dims=len(_UpperCAmelCase ) , )
A_ : Dict[str, Any] = tensor_tree_map(_UpperCAmelCase , _UpperCAmelCase )
# Run the layer on the chunk
A_ : Optional[int] = layer(**_UpperCAmelCase )
# Allocate space for the output
if out is None:
A_ : Optional[int] = tensor_tree_map(lambda _UpperCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _UpperCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
def assign(_UpperCAmelCase , _UpperCAmelCase ) -> None:
for k, v in da.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
assign(_UpperCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
A_ : str = da[k]
assign(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for xa, xa in zip(_UpperCAmelCase , _UpperCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
A_ : Optional[Any] = xa
elif isinstance(_UpperCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
A_ : Optional[int] = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
A_ : Union[str, Any] = tensor_tree_map(lambda _UpperCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , _UpperCAmelCase )
return out
class lowercase :
def __init__( self : str , _lowerCamelCase : int = 5_12 , ):
"""simple docstring"""
A_ : Dict = max_chunk_size
A_ : Optional[int] = None
A_ : Optional[tuple] = None
def a_ ( self : Any , _lowerCamelCase : Callable , _lowerCamelCase : tuple , _lowerCamelCase : int ):
"""simple docstring"""
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
A_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
A_ : int = [c for c in candidates if c > min_chunk_size]
A_ : Tuple = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_lowerCamelCase : int ) -> bool:
try:
with torch.no_grad():
fn(*_lowerCamelCase , chunk_size=_lowerCamelCase )
return True
except RuntimeError:
return False
A_ : Any = 0
A_ : Optional[Any] = len(_lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
A_ : int = test_chunk_size(candidates[i] )
if not viable:
A_ : Tuple = (min_viable_chunk_size_index + i) // 2
else:
A_ : int = i
A_ : str = (i + len(_lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a_ ( self : Tuple , _lowerCamelCase : Iterable , _lowerCamelCase : Iterable ):
"""simple docstring"""
A_ : Optional[Any] = True
for aa, aa in zip(_lowerCamelCase , _lowerCamelCase ):
assert type(_lowerCamelCase ) == type(_lowerCamelCase )
if isinstance(_lowerCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Tuple = [v for _, v in sorted(aa.items() , key=lambda _lowerCamelCase : x[0] )]
A_ : str = [v for _, v in sorted(aa.items() , key=lambda _lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(_lowerCamelCase , _lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def a_ ( self : Dict , _lowerCamelCase : Callable , _lowerCamelCase : tuple , _lowerCamelCase : int , ):
"""simple docstring"""
A_ : int = True
A_ : tuple = tree_map(lambda _lowerCamelCase : a.shape if isinstance(_lowerCamelCase , torch.Tensor ) else a , _lowerCamelCase , _lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_lowerCamelCase )
A_ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , _lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
A_ : Dict = False
if not consistent:
A_ : List[str] = self._determine_favorable_chunk_size(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
A_ : Dict = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 703 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Optional[Any] = """camembert"""
def __init__( self : List[Any] , _lowerCamelCase : Optional[Any]=3_05_22 , _lowerCamelCase : List[str]=7_68 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : Any=30_72 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Union[str, Any]=5_12 , _lowerCamelCase : str=2 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=1E-12 , _lowerCamelCase : str=1 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : List[str]="absolute" , _lowerCamelCase : str=True , _lowerCamelCase : List[Any]=None , **_lowerCamelCase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
A_ : Any = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : List[Any] = hidden_act
A_ : Optional[int] = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Tuple = type_vocab_size
A_ : Dict = initializer_range
A_ : Tuple = layer_norm_eps
A_ : List[Any] = position_embedding_type
A_ : Any = use_cache
A_ : Dict = classifier_dropout
class lowercase ( __UpperCAmelCase):
@property
def a_ ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 361 | 0 |
'''simple docstring'''
from collections.abc import Callable
def A_( A : Callable[[float], float] , A : float , A : float):
UpperCamelCase = a
UpperCamelCase = b
if function(A) == 0: # one of the a or b is a root for the function
return a
elif function(A) == 0:
return b
elif (
function(A) * function(A) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.')
else:
UpperCamelCase = start + (end - start) / 2.0
while abs(start - mid) > 10**-7: # until precisely equals to 10^-7
if function(A) == 0:
return mid
elif function(A) * function(A) < 0:
UpperCamelCase = mid
else:
UpperCamelCase = mid
UpperCamelCase = start + (end - start) / 2.0
return mid
def A_( A : float):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def a__ ( *_lowercase, **_lowercase ) -> Tuple:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_a = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def a__ ( self, _lowercase, _lowercase, _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = pipeline(
'zero-shot-object-detection', model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def a__ ( self, _lowercase, _lowercase ) -> str:
SCREAMING_SNAKE_CASE_ = object_detector(examples[0], threshold=0.0 )
SCREAMING_SNAKE_CASE_ = len(_lowercase )
self.assertGreater(_lowercase, 0 )
self.assertEqual(
_lowercase, [
{
'score': ANY(_lowercase ),
'label': ANY(_lowercase ),
'box': {'xmin': ANY(_lowercase ), 'ymin': ANY(_lowercase ), 'xmax': ANY(_lowercase ), 'ymax': ANY(_lowercase )},
}
for i in range(_lowercase )
], )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def a__ ( self ) -> Union[str, Any]:
pass
@require_torch
def a__ ( self ) -> str:
SCREAMING_SNAKE_CASE_ = pipeline(
'zero-shot-object-detection', model='hf-internal-testing/tiny-random-owlvit-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png', candidate_labels=['cat', 'remote', 'couch'], threshold=0.64, )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
], )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
], threshold=0.64, )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
[
{'score': 0.7_235, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_218, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_184, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_748, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_656, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_614, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_456, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.642, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_419, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
], )
@require_torch
@slow
def a__ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
], )
SCREAMING_SNAKE_CASE_ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
], )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_474, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_208, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
], )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def a__ ( self ) -> Optional[Any]:
pass
@require_torch
@slow
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = 0.2
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], threshold=_lowercase, )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_537, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
], )
@require_torch
@slow
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = pipeline('zero-shot-object-detection' )
SCREAMING_SNAKE_CASE_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg', candidate_labels=['cat', 'remote', 'couch'], top_k=_lowercase, )
self.assertEqual(
nested_simplify(_lowercase, decimals=4 ), [
{'score': 0.2_868, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.277, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
], )
| 294 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> float:
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> None:
if point:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for item in point:
if not isinstance(UpperCamelCase_ , (int, float) ):
UpperCamelCase_ = (
"Expected a list of numbers as input, found "
F'''{type(UpperCamelCase_ ).__name__}'''
)
raise TypeError(UpperCamelCase_ )
else:
UpperCamelCase_ = F'''Expected a list of numbers as input, found {type(UpperCamelCase_ ).__name__}'''
raise TypeError(UpperCamelCase_ )
else:
raise ValueError("Missing an input" )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> float:
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : List[str] = '''openai-gpt'''
_UpperCamelCase : List[str] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: int , _SCREAMING_SNAKE_CASE: int=40478 , _SCREAMING_SNAKE_CASE: Optional[Any]=512 , _SCREAMING_SNAKE_CASE: List[Any]=768 , _SCREAMING_SNAKE_CASE: str=12 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[int]="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: Any=1e-5 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: str="cls_index" , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> int:
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = n_positions
UpperCamelCase_ = n_embd
UpperCamelCase_ = n_layer
UpperCamelCase_ = n_head
UpperCamelCase_ = afn
UpperCamelCase_ = resid_pdrop
UpperCamelCase_ = embd_pdrop
UpperCamelCase_ = attn_pdrop
UpperCamelCase_ = layer_norm_epsilon
UpperCamelCase_ = initializer_range
UpperCamelCase_ = summary_type
UpperCamelCase_ = summary_use_proj
UpperCamelCase_ = summary_activation
UpperCamelCase_ = summary_first_dropout
UpperCamelCase_ = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 371 | 0 |
snake_case__ : Any = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def lowerCamelCase__ ( _lowerCamelCase ) ->int:
_UpperCAmelCase =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
snake_case__ : Optional[int] = True
snake_case__ : List[Any] = False
def lowerCamelCase__ ( _lowerCamelCase ) ->bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCAmelCase =chain(next_number(_lowerCamelCase ) )
_UpperCAmelCase =number_chain
while number < 1000_0000:
_UpperCAmelCase =number_chain
number *= 10
return number_chain
def lowerCamelCase__ ( _lowerCamelCase = 1000_0000 ) ->int:
for i in range(1 , _lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 408 |
from __future__ import annotations
from collections.abc import MutableSequence
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case ):
if len(_snake_case ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
_UpperCAmelCase =list(_snake_case )
_UpperCAmelCase =degree
def __add__( self , _snake_case ):
if self.degree > polynomial_a.degree:
_UpperCAmelCase =self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _snake_case )
else:
_UpperCAmelCase =polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _snake_case )
def __sub__( self , _snake_case ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _snake_case ):
_UpperCAmelCase =[0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
_UpperCAmelCase =""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_snake_case )
return polynomial
def __repr__( self ):
return self.__str__()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =[0] * self.degree
for i in range(self.degree ):
_UpperCAmelCase =self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case = 0 ):
_UpperCAmelCase =[0] * (self.degree + 2)
_UpperCAmelCase =constant
for i in range(self.degree + 1 ):
_UpperCAmelCase =self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _snake_case )
def __eq__( self , _snake_case ):
if not isinstance(_snake_case , _snake_case ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _snake_case ):
return not self.__eq__(_snake_case )
| 408 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__lowercase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__lowercase = DDPMScheduler()
__lowercase = AudioDiffusionPipeline(vqvae=_a , unet=self.dummy_unet , mel=_a , scheduler=_a )
__lowercase = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__lowercase = torch.Generator(device=_a ).manual_seed(4_2 )
__lowercase = pipe(generator=_a , steps=4 )
__lowercase = output.audios[0]
__lowercase = output.images[0]
__lowercase = torch.Generator(device=_a ).manual_seed(4_2 )
__lowercase = pipe(generator=_a , steps=4 , return_dict=_a )
__lowercase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__lowercase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__lowercase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
__lowercase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__lowercase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__lowercase = DDIMScheduler()
__lowercase = self.dummy_vqvae_and_unet
__lowercase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_a , scheduler=_a )
__lowercase = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
np.random.seed(0 )
__lowercase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__lowercase = torch.Generator(device=_a ).manual_seed(4_2 )
__lowercase = pipe(raw_audio=_a , generator=_a , start_step=5 , steps=1_0 )
__lowercase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__lowercase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__lowercase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__lowercase = self.dummy_unet_condition
__lowercase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_a , mel=_a , scheduler=_a )
__lowercase = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
np.random.seed(0 )
__lowercase = torch.rand((1, 1, 1_0) )
__lowercase = pipe(generator=_a , encoding=_a )
__lowercase = output.images[0]
__lowercase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__lowercase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = torch_device
__lowercase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__lowercase = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__lowercase = torch.Generator(device=_a ).manual_seed(4_2 )
__lowercase = pipe(generator=_a )
__lowercase = output.audios[0]
__lowercase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__lowercase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__lowercase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 710 |
"""simple docstring"""
import random
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ):
"""simple docstring"""
__lowercase = {i: [] for i in range(UpperCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if random.random() < probability:
graph[i].append(UpperCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCamelCase__ )
return graph
def lowerCAmelCase_ ( UpperCamelCase__ : int ):
"""simple docstring"""
return {
i: [j for j in range(UpperCamelCase__ ) if i != j] for i in range(UpperCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 442 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowercase_ = 3
def a ( A__ : int ) -> Any:
"""simple docstring"""
print('Generating primitive root of p' )
while True:
_lowercase =random.randrange(3 , UpperCAmelCase_ )
if pow(UpperCAmelCase_ , 2 , UpperCAmelCase_ ) == 1:
continue
if pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) == 1:
continue
return g
def a ( A__ : int ) -> Union[str, Any]:
"""simple docstring"""
print('Generating prime p...' )
_lowercase =rabin_miller.generate_large_prime(UpperCAmelCase_ ) # select large prime number.
_lowercase =primitive_root(UpperCAmelCase_ ) # one primitive root on modulo p.
_lowercase =random.randrange(3 , UpperCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
_lowercase =cryptomath.find_mod_inverse(pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
_lowercase =(key_size, e_a, e_a, p)
_lowercase =(key_size, d)
return public_key, private_key
def a ( A__ : str , A__ : int ) -> Optional[int]:
"""simple docstring"""
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
_lowercase , _lowercase =generate_key(UpperCAmelCase_ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def a ( ) -> Any:
"""simple docstring"""
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 291 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=2 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=1000 , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = range_bbox
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ = bbox[i, j, 3]
lowerCamelCase_ = bbox[i, j, 1]
lowerCamelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ = bbox[i, j, 2]
lowerCamelCase_ = bbox[i, j, 0]
lowerCamelCase_ = t
lowerCamelCase_ = tf.convert_to_tensor(UpperCamelCase )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModel(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMForMaskedLM(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFLayoutLMForSequenceClassification(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFLayoutLMForTokenClassification(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMForQuestionAnswering(config=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class snake_case ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = 10
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFLayoutLMModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def snake_case ( self ):
"""simple docstring"""
pass
def __snake_case ( ):
# Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on:
# fmt: off
lowerCamelCase_ = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
lowerCamelCase_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
lowerCamelCase_ = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
# test the sequence output on [0, :3, :3]
lowerCamelCase_ = tf.convert_to_tensor(
[[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-3 ) )
# test the pooled output on [1, :3]
lowerCamelCase_ = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , UpperCamelCase , atol=1e-3 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized sequence classification head
lowerCamelCase_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(
input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
lowerCamelCase_ = outputs.loss
lowerCamelCase_ = (2,)
self.assertEqual(loss.shape , UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = (2, 2)
self.assertEqual(logits.shape , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized token classification head
lowerCamelCase_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(
input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
# initialize model with randomly initialized token classification head
lowerCamelCase_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = prepare_layoutlm_batch_inputs()
# forward pass
lowerCamelCase_ = model(input_ids=UpperCamelCase , bbox=UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
# test the shape of the logits
lowerCamelCase_ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , UpperCamelCase )
self.assertEqual(outputs.end_logits.shape , UpperCamelCase )
| 675 | 0 |
from __future__ import annotations
def lowerCAmelCase ( _lowerCAmelCase : int = 4 ):
"""simple docstring"""
UpperCAmelCase__ = abs(_lowerCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(_lowerCAmelCase )] for y in range(_lowerCAmelCase )]
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(transpose(_lowerCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
return reverse_row(reverse_column(_lowerCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
return reverse_column(transpose(_lowerCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = [list(_lowerCAmelCase ) for x in zip(*_lowerCAmelCase )]
return matrix
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = matrix[::-1]
return matrix
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = [x[::-1] for x in matrix]
return matrix
def lowerCAmelCase ( _lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
for i in matrix:
print(*_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
_lowerCAmelCase : Union[str, Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
_lowerCAmelCase : Optional[int] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 364 |
import math
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCAmelCase )
def lowerCAmelCase ( _lowerCAmelCase : float = 1 / 1_2345 ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 3
while True:
UpperCAmelCase__ = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCAmelCase ):
UpperCAmelCase__ = int(_lowerCAmelCase )
total_partitions += 1
if check_partition_perfect(_lowerCAmelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCAmelCase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : str = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284 |
'''simple docstring'''
def snake_case ( snake_case : int , snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase = _modexpt(snake_case , exponent // 2 , snake_case ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(snake_case , exponent - 1 , snake_case )) % modulo_value
def snake_case ( snake_case : int = 1777 , snake_case : int = 1855 , snake_case : int = 8 ) -> int:
"""simple docstring"""
lowerCAmelCase = base
for _ in range(1 , snake_case ):
lowerCAmelCase = _modexpt(snake_case , snake_case , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 284 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] ={
'''uw-madison/mra-base-512-4''': '''https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json''',
}
class __A ( a ):
__A = """mra"""
def __init__( self , UpperCAmelCase_=50265 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=512 , UpperCAmelCase_=1 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="absolute" , UpperCAmelCase_=4 , UpperCAmelCase_="full" , UpperCAmelCase_=0 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=0 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase =vocab_size
lowerCamelCase =max_position_embeddings
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =initializer_range
lowerCamelCase =type_vocab_size
lowerCamelCase =layer_norm_eps
lowerCamelCase =position_embedding_type
lowerCamelCase =block_per_row
lowerCamelCase =approx_mode
lowerCamelCase =initial_prior_first_n_blocks
lowerCamelCase =initial_prior_diagonal_n_blocks
| 269 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Tuple =logging.get_logger(__name__)
UpperCAmelCase__ : str =['''model.decoder.embed_positions.weights''']
def _lowercase ( _UpperCAmelCase ) -> List[Any]:
if "emb" in name:
lowerCamelCase =name.replace("""emb""" , """model.decoder.embed_tokens""" )
if "transformer" in name:
lowerCamelCase =name.replace("""transformer""" , """model.decoder""" )
if "cross_attention" in name:
lowerCamelCase =name.replace("""cross_attention""" , """encoder_attn""" )
if "linear1" in name:
lowerCamelCase =name.replace("""linear1""" , """fc1""" )
if "linear2" in name:
lowerCamelCase =name.replace("""linear2""" , """fc2""" )
if "norm1" in name:
lowerCamelCase =name.replace("""norm1""" , """self_attn_layer_norm""" )
if "norm_cross" in name:
lowerCamelCase =name.replace("""norm_cross""" , """encoder_attn_layer_norm""" )
if "norm2" in name:
lowerCamelCase =name.replace("""norm2""" , """final_layer_norm""" )
if "out_norm" in name:
lowerCamelCase =name.replace("""out_norm""" , """model.decoder.layer_norm""" )
if "linears" in name:
lowerCamelCase =name.replace("""linears""" , """lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase =name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" )
return name
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple[Dict, Dict]:
lowerCamelCase =list(state_dict.keys() )
lowerCamelCase ={}
for key in keys:
lowerCamelCase =state_dict.pop(_UpperCAmelCase )
lowerCamelCase =rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase =val[:hidden_size, :]
lowerCamelCase =val[hidden_size : 2 * hidden_size, :]
lowerCamelCase =val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase =val
else:
lowerCamelCase =val
return state_dict, enc_dec_proj_state_dict
def _lowercase ( _UpperCAmelCase ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCamelCase =10_24
lowerCamelCase =24
lowerCamelCase =16
elif checkpoint == "medium":
lowerCamelCase =15_36
lowerCamelCase =48
lowerCamelCase =24
elif checkpoint == "large":
lowerCamelCase =20_48
lowerCamelCase =48
lowerCamelCase =32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowerCamelCase =MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ) -> Dict:
lowerCamelCase =MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
lowerCamelCase =decoder_config_from_checkpoint(_UpperCAmelCase )
lowerCamelCase =fairseq_model.lm.state_dict()
lowerCamelCase , lowerCamelCase =rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
lowerCamelCase =TaEncoderModel.from_pretrained("""t5-base""" )
lowerCamelCase =EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
lowerCamelCase =MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase , lowerCamelCase =decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_UpperCAmelCase ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowerCamelCase =MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
lowerCamelCase =torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase =input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase =model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
lowerCamelCase =AutoTokenizer.from_pretrained("""t5-base""" )
lowerCamelCase =AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" )
lowerCamelCase =MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
lowerCamelCase =20_48
lowerCamelCase =20_48
# set other default generation config params
lowerCamelCase =int(30 * audio_encoder.config.frame_rate )
lowerCamelCase =True
lowerCamelCase =3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
UpperCAmelCase__ : Optional[Any] =parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 269 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''Hello, World!'''
lowerCAmelCase__ = '''en_XX'''
def snake_case_ ( A_ : str, A_ : str, A_ : bool ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = Path('''data_bin''' )
_lowerCamelCase : List[str] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(A_ ).parent ), checkpoint_file=Path(A_ ).name, _name='''xmod_base''', arch='''xmod_base''', task='''multilingual_masked_lm''', data_name_or_path=str(A_ ), bpe='''sentencepiece''', sentencepiece_model=str(Path(A_ ).parent / '''sentencepiece.bpe.model''' ), src_dict=str(data_dir / '''dict.txt''' ), )
xmod.eval() # disable dropout
print(A_ )
_lowerCamelCase : Optional[int] = xmod.model.encoder.sentence_encoder
_lowerCamelCase : str = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings, hidden_size=xmod.cfg.model.encoder_embed_dim, num_hidden_layers=xmod.cfg.model.encoder_layers, num_attention_heads=xmod.cfg.model.encoder_attention_heads, intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=5_14, type_vocab_size=1, layer_norm_eps=1E-5, pre_norm=xmod.cfg.model.encoder_normalize_before, adapter_reduction_factor=getattr(xmod.cfg.model, '''bottleneck''', 2 ), adapter_layer_norm=xmod.cfg.model.adapter_layer_norm, adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm, ln_before_adapter=xmod.cfg.model.ln_before_adapter, languages=xmod.cfg.model.languages, )
if classification_head:
_lowerCamelCase : List[str] = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''', A_ )
_lowerCamelCase : Optional[int] = XmodForSequenceClassification(A_ ) if classification_head else XmodForMaskedLM(A_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCamelCase : Optional[int] = xmod_sent_encoder.embed_tokens.weight
_lowerCamelCase : int = xmod_sent_encoder.embed_positions.weight
_lowerCamelCase : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_lowerCamelCase : Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
_lowerCamelCase : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCamelCase : Optional[int] = model.roberta.encoder.layer[i]
_lowerCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
_lowerCamelCase : int = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
_lowerCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.weight
_lowerCamelCase : List[Any] = xmod_layer.self_attn.q_proj.bias
_lowerCamelCase : Optional[int] = xmod_layer.self_attn.k_proj.weight
_lowerCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
_lowerCamelCase : str = xmod_layer.self_attn.v_proj.weight
_lowerCamelCase : List[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
_lowerCamelCase : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
_lowerCamelCase : Dict = xmod_layer.self_attn.out_proj.weight
_lowerCamelCase : Tuple = xmod_layer.self_attn.out_proj.bias
_lowerCamelCase : Optional[Any] = xmod_layer.self_attn_layer_norm.weight
_lowerCamelCase : Tuple = xmod_layer.self_attn_layer_norm.bias
# intermediate
_lowerCamelCase : List[str] = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
_lowerCamelCase : int = xmod_layer.fca.weight
_lowerCamelCase : int = xmod_layer.fca.bias
# output
_lowerCamelCase : int = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
_lowerCamelCase : Tuple = xmod_layer.fca.weight
_lowerCamelCase : Dict = xmod_layer.fca.bias
_lowerCamelCase : Tuple = xmod_layer.final_layer_norm.weight
_lowerCamelCase : str = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_lowerCamelCase : List[str] = xmod_layer.adapter_layer_norm.weight
_lowerCamelCase : str = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_lowerCamelCase : List[str] = bert_output.adapter_modules[lang_code]
_lowerCamelCase : Optional[Any] = xmod_layer.adapter_modules[lang_code]
_lowerCamelCase : Tuple = from_adapter.fca.weight
_lowerCamelCase : str = from_adapter.fca.bias
_lowerCamelCase : int = from_adapter.fca.weight
_lowerCamelCase : List[str] = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_lowerCamelCase : List[str] = xmod_sent_encoder.layer_norm.weight
_lowerCamelCase : Union[str, Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
_lowerCamelCase : Tuple = xmod.model.classification_heads['''mnli'''].dense.weight
_lowerCamelCase : Optional[Any] = xmod.model.classification_heads['''mnli'''].dense.bias
_lowerCamelCase : Any = xmod.model.classification_heads['''mnli'''].out_proj.weight
_lowerCamelCase : int = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_lowerCamelCase : str = xmod.model.encoder.lm_head.dense.weight
_lowerCamelCase : List[Any] = xmod.model.encoder.lm_head.dense.bias
_lowerCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
_lowerCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.bias
_lowerCamelCase : int = xmod.model.encoder.lm_head.weight
_lowerCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCamelCase : int = xmod.encode(A_ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(A_ )
_lowerCamelCase : List[Any] = model(A_ )[0]
if classification_head:
_lowerCamelCase : str = xmod.model.classification_heads['''mnli'''](xmod.extract_features(A_ ) )
else:
_lowerCamelCase : Optional[int] = xmod.model(A_, lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape, their_output.shape )
_lowerCamelCase : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_lowerCamelCase : List[Any] = torch.allclose(A_, A_, atol=1E-3 )
print('''Do both models output the same tensors?''', '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(A_ ).mkdir(parents=A_, exist_ok=A_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 83 |
"""simple docstring"""
def lowerCamelCase (a_ :int = 100) -> int:
lowercase :Union[str, Any] = set()
lowercase :List[Any] = 0
lowercase :Dict = n + 1 # maximum limit
for a in range(2 , a_):
for b in range(2 , a_):
lowercase :Tuple = a**b # calculates the current power
collect_powers.add(a_) # adds the result to the set
return len(a_)
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 677 | 0 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __A ( a_ : np.ndarray ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __A ( a_ : np.ndarray ):
return (gray > 1_2_7) & (gray <= 2_5_5)
def __A ( a_ : np.ndarray ,a_ : np.ndarray ):
lowerCAmelCase : Optional[Any] = np.zeros_like(a_ )
lowerCAmelCase : List[str] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase : Any = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase : Union[str, Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCAmelCase = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowerCAmelCase = np.array(Image.open(lena_path))
# kernel to be applied
lowerCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCAmelCase = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 551 |
'''simple docstring'''
def __A ( a_ : int ):
if not isinstance(a_ ,a_ ):
lowerCAmelCase : Dict = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 0:
return False
lowerCAmelCase : Dict = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 1 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A__ :
def __init__( self : List[str] , _a : str , _a : List[str]=13 , _a : Tuple=30 , _a : Any=2 , _a : str=3 , _a : Tuple=True , _a : int=True , _a : List[str]=32 , _a : int=5 , _a : List[str]=4 , _a : Union[str, Any]=37 , _a : Optional[int]="gelu" , _a : str=0.1 , _a : Optional[int]=0.1 , _a : Any=10 , _a : Dict=0.02 , _a : List[Any]=3 , _a : Union[str, Any]=None , _a : List[Any]=2 , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_SCREAMING_SNAKE_CASE =(image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE =num_patches + 2
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self : Optional[Any] , _a : List[Any] , _a : Optional[int] , _a : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : str , _a : List[Any] , _a : int , _a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =DeiTForMaskedImageModeling(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self : Optional[Any] , _a : List[Any] , _a : int , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.type_sequence_label_size
_SCREAMING_SNAKE_CASE =DeiTForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =DeiTForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_SCREAMING_SNAKE_CASE =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCamelCase ( self : Union[str, Any] , _a : int , _a : Optional[Any] , _a : List[Any]=False ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_a )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a , return_labels=_a )
_SCREAMING_SNAKE_CASE =model(**_a ).loss
loss.backward()
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_SCREAMING_SNAKE_CASE =model_class(_a )
model.gradient_checkpointing_enable()
model.to(_a )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a , return_labels=_a )
_SCREAMING_SNAKE_CASE =model(**_a ).loss
loss.backward()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =[
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_a ),
*get_values(_a ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
_SCREAMING_SNAKE_CASE =problem_type['''title''']
_SCREAMING_SNAKE_CASE =problem_type['''num_labels''']
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.train()
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a , return_labels=_a )
if problem_type["num_labels"] > 1:
_SCREAMING_SNAKE_CASE =inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
_SCREAMING_SNAKE_CASE =inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_a ) as warning_list:
_SCREAMING_SNAKE_CASE =model(**_a ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =DeiTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =inputs.pixel_values.to(_a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(_a ) | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
_snake_case = BlenderbotSmallTokenizer
_snake_case = False
def snake_case ( self ) -> List[str]:
super().setUp()
__lowerCAmelCase : List[Any] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
__lowerCAmelCase : Optional[Any] = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase : List[str] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
__lowerCAmelCase : Optional[int] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def snake_case ( self , **SCREAMING_SNAKE_CASE ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase : Optional[int] = 'adapt act apte'
__lowerCAmelCase : str = 'adapt act apte'
return input_text, output_text
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : List[Any] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase : Dict = 'adapt act apte'
__lowerCAmelCase : List[str] = ['adapt', 'act', 'ap@@', 'te']
__lowerCAmelCase : int = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowerCAmelCase : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__lowerCAmelCase : str = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def snake_case ( self ) -> Tuple:
__lowerCAmelCase : Dict = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [13_84]
__lowerCAmelCase : int = 'I am a small frog.'
__lowerCAmelCase : Optional[Any] = tok([src_text] , padding=__a , truncation=__a )['input_ids']
__lowerCAmelCase : int = tok.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case ( self ) -> Dict:
__lowerCAmelCase : Any = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
__lowerCAmelCase : str = 'I am a small frog .'
__lowerCAmelCase : Tuple = '.'
__lowerCAmelCase : int = tok(__a )['input_ids']
__lowerCAmelCase : Optional[Any] = tok(__a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 717 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A_ = typing.Union[np.floataa, int, float] # noqa: UP007
def A ( _UpperCAmelCase : Vector ,_UpperCAmelCase : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCAmelCase ) - np.asarray(_UpperCAmelCase )) ** 2 ) )
def A ( _UpperCAmelCase : Vector ,_UpperCAmelCase : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def A ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' ,number=1_0_0_0_0 ,globals=globals() ,) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' ,number=1_0_0_0_0 ,globals=globals() ,) )
benchmark()
| 123 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution()) | 578 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Any = '''unispeech'''
def __init__( self : Tuple , lowerCAmelCase__ : List[Any]=3_2 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Dict=1_2 , lowerCAmelCase__ : int=3_0_7_2 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : int=1E-5 , lowerCAmelCase__ : Dict="group" , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase__ : Any=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__ : List[str]=False , lowerCAmelCase__ : List[Any]=1_2_8 , lowerCAmelCase__ : Union[str, Any]=1_6 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[str]=0.05 , lowerCAmelCase__ : Tuple=1_0 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Dict=1_0 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : Any=3_2_0 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : int=1_0_0 , lowerCAmelCase__ : List[str]=2_5_6 , lowerCAmelCase__ : int=2_5_6 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int="mean" , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=2_5_6 , lowerCAmelCase__ : int=8_0 , lowerCAmelCase__ : Union[str, Any]=0 , lowerCAmelCase__ : str=1 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : str=0.5 , **lowerCAmelCase__ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_norm
__SCREAMING_SNAKE_CASE : Tuple = feat_extract_activation
__SCREAMING_SNAKE_CASE : int = list(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = list(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = list(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = conv_bias
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE : List[str] = len(self.conv_dim )
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : List[str] = hidden_act
__SCREAMING_SNAKE_CASE : str = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
__SCREAMING_SNAKE_CASE : Dict = attention_dropout
__SCREAMING_SNAKE_CASE : Tuple = activation_dropout
__SCREAMING_SNAKE_CASE : Tuple = feat_proj_dropout
__SCREAMING_SNAKE_CASE : Optional[int] = final_dropout
__SCREAMING_SNAKE_CASE : Tuple = layerdrop
__SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : int = num_ctc_classes
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Tuple = do_stable_layer_norm
__SCREAMING_SNAKE_CASE : int = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE : List[str] = apply_spec_augment
__SCREAMING_SNAKE_CASE : str = mask_time_prob
__SCREAMING_SNAKE_CASE : Dict = mask_time_length
__SCREAMING_SNAKE_CASE : Optional[int] = mask_time_min_masks
__SCREAMING_SNAKE_CASE : Optional[Any] = mask_feature_prob
__SCREAMING_SNAKE_CASE : Dict = mask_feature_length
__SCREAMING_SNAKE_CASE : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE : Tuple = num_codevectors_per_group
__SCREAMING_SNAKE_CASE : Optional[int] = num_codevector_groups
__SCREAMING_SNAKE_CASE : List[str] = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE : Dict = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE : int = num_negatives
__SCREAMING_SNAKE_CASE : Tuple = codevector_dim
__SCREAMING_SNAKE_CASE : Optional[int] = proj_codevector_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
__SCREAMING_SNAKE_CASE : List[str] = ctc_zero_infinity
# pretraining loss
__SCREAMING_SNAKE_CASE : Tuple = replace_prob
@property
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 578 | 1 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowercase = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
_lowercase = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
_lowercase = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
_lowercase = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
_lowercase = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _lowercase ( self , _lowercase , _lowercase , _lowercase=[1, 10, 100] , _lowercase=4 , _lowercase=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_lowercase ) as executor:
_lowerCAmelCase = []
_lowerCAmelCase = Counter()
_lowerCAmelCase = 0
_lowerCAmelCase = defaultdict(_lowercase )
for task_id, (candidates, test_case) in enumerate(zip(_lowercase , _lowercase ) ):
for candidate in candidates:
_lowerCAmelCase = candidate + """\n""" + test_case
_lowerCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_lowerCAmelCase = executor.submit(_lowercase , *_lowercase )
futures.append(_lowercase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_lowercase ):
_lowerCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_lowerCAmelCase , _lowerCAmelCase = [], []
for result in results.values():
result.sort()
_lowerCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(_lowercase ) )
correct.append(sum(_lowercase ) )
_lowerCAmelCase = np.array(_lowercase )
_lowerCAmelCase = np.array(_lowercase )
_lowerCAmelCase = k
_lowerCAmelCase = {F'pass@{k}': estimate_pass_at_k(_lowercase , _lowercase , _lowercase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Dict ):
def estimator(__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_lowerCAmelCase = itertools.repeat(__lowerCamelCase , len(__lowerCamelCase ) )
else:
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_lowerCAmelCase = iter(__lowerCamelCase )
return np.array([estimator(int(__lowerCamelCase ) , int(__lowerCamelCase ) , __lowerCamelCase ) for n, c in zip(__lowerCamelCase , __lowerCamelCase )] )
| 162 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=18 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , ):
"""simple docstring"""
_lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
def _lowercase ( self ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ImageGPTImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """clusters""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowercase , """image_processor.json""" )
image_processor_first.to_json_file(_lowercase )
_lowerCAmelCase = self.image_processing_class.from_json_file(_lowercase ).to_dict()
_lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowercase )
_lowerCAmelCase = self.image_processing_class.from_pretrained(_lowercase ).to_dict()
_lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowercase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowercase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def _lowercase ( self ):
"""simple docstring"""
pass
def A ():
_lowerCAmelCase = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
_lowerCAmelCase = Image.open(dataset[4]["""file"""] )
_lowerCAmelCase = Image.open(dataset[5]["""file"""] )
_lowerCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_lowerCAmelCase = prepare_images()
# test non-batched
_lowerCAmelCase = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
_lowerCAmelCase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowercase )
# test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
_lowerCAmelCase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowercase )
| 162 | 1 |
def __UpperCAmelCase( lowercase_ ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
_lowerCamelCase : Any = 1, 1
for _ in range(number_of_steps - 1 ):
_lowerCamelCase : int = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : Dict = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
UpperCAmelCase_ : str = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
UpperCAmelCase_ : List[str] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Any = VOCAB_FILES_NAMES
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : Tuple = ["""input_ids""", """attention_mask"""]
A_ : Dict = MBartTokenizer
A_ : List[int] = []
A_ : List[int] = []
def __init__( self , __snake_case=None , __snake_case=None , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE : List[Any] = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
vocab_file=__snake_case , tokenizer_file=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , **__snake_case , )
_SCREAMING_SNAKE_CASE : List[str] = vocab_file
_SCREAMING_SNAKE_CASE : Optional[int] = False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_SCREAMING_SNAKE_CASE : str = {
lang_code: self.convert_tokens_to_ids(__snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_SCREAMING_SNAKE_CASE : Optional[Any] = src_lang if src_lang is not None else """en_XX"""
_SCREAMING_SNAKE_CASE : str = self.convert_tokens_to_ids(self._src_lang )
_SCREAMING_SNAKE_CASE : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self ):
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
_SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_SCREAMING_SNAKE_CASE : Dict = src_lang
_SCREAMING_SNAKE_CASE : List[Any] = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
_SCREAMING_SNAKE_CASE : Dict = self.convert_tokens_to_ids(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self , __snake_case , __snake_case = "en_XX" , __snake_case = None , __snake_case = "ro_RO" , **__snake_case , ):
_SCREAMING_SNAKE_CASE : Optional[Any] = src_lang
_SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def UpperCAmelCase_ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = self.convert_tokens_to_ids(__snake_case )
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
_SCREAMING_SNAKE_CASE : int = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self , __snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = self.convert_tokens_to_ids(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : Tuple = [self.eos_token_id, self.cur_lang_code]
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_SCREAMING_SNAKE_CASE : str = self.convert_ids_to_tokens(self.suffix_tokens )
_SCREAMING_SNAKE_CASE : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 533 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = False , snake_case = False , snake_case = None , **snake_case , ):
super().__init__(
snake_case , split=snake_case , features=snake_case , cache_dir=snake_case , keep_in_memory=snake_case , streaming=snake_case , num_proc=snake_case , **snake_case , )
lowercase = path_or_paths if isinstance(snake_case , snake_case ) else {self.split: path_or_paths}
lowercase = Text(
cache_dir=snake_case , data_files=snake_case , features=snake_case , **snake_case , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=snake_case , download_mode=snake_case , verification_mode=snake_case , base_path=snake_case , num_proc=self.num_proc , )
lowercase = self.builder.as_dataset(
split=self.split , verification_mode=snake_case , in_memory=self.keep_in_memory )
return dataset
| 565 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 565 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( A_ ):
__UpperCAmelCase = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase = '''AutoImageProcessor'''
__UpperCAmelCase = '''AutoTokenizer'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[int] = self.image_processor
def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE) -> str:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_lowerCamelCase : str = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if images is not None:
_lowerCamelCase : Dict = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if text is not None and images is not None:
_lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE) , tensor_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Tuple:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
@property
def UpperCamelCase_ ( self) -> int:
return ["input_ids", "attention_mask", "pixel_values"]
| 88 |
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_A = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "facebook/nllb-200-distilled-600M"
a = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
a = "translator"
a = AutoTokenizer
a = AutoModelForSeqaSeqLM
a = LANGUAGE_CODES
a = ["text", "text", "text"]
a = ["text"]
def lowerCAmelCase_ ( self : Tuple , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : Optional[int] ) -> Any:
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
SCREAMING_SNAKE_CASE__ = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_snake_case , return_tensors="pt" , src_lang=_snake_case , tgt_lang=_snake_case )
def lowerCAmelCase_ ( self : Tuple , _snake_case : List[str] ) -> List[Any]:
return self.model.generate(**_snake_case )
def lowerCAmelCase_ ( self : Any , _snake_case : Tuple ) -> int:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_snake_case )
| 538 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 538 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.